file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
run.go
func NewFlagSet(name string) *FlagSet { return &FlagSet{ FlagSet: pflag.NewFlagSet(name, pflag.ContinueOnError), Name: name, } } // Unit is the default interface an object needs to implement for it to be able // to register with a Group. // Name should return a short but good identifier of the Unit. type Unit interface { Name() string } // Config interface should be implemented by Group Unit objects that manage // their own configuration through the use of flags. // If a Unit's Validate returns an error it will stop the Group immediately. type Config interface { // Unit for Group registration and identification Unit // FlagSet returns an object's FlagSet FlagSet() *FlagSet // Validate checks an object's stored values Validate() error } // PreRunner interface should be implemented by Group Unit objects that need // a pre run stage before starting the Group Services. // If a Unit's PreRun returns an error it will stop the Group immediately. type PreRunner interface { // Unit for Group registration and identification Unit PreRun() error } // NewPreRunner takes a name and a standalone pre runner compatible function // and turns them into a Group compatible PreRunner, ready for registration. func NewPreRunner(name string, fn func() error) PreRunner { return preRunner{name: name, fn: fn} } type preRunner struct { fn func() error name string } func (p preRunner) Name() string { return p.name } func (p preRunner) PreRun() error { return p.fn() } // StopNotify sends the stopped event to the running system. type StopNotify <-chan struct{} // Service interface should be implemented by Group Unit objects that need // to run a blocking service until an error occurs or a shutdown request is // made. // The Serve method must be blocking and return an error on unexpected shutdown. // Recoverable errors need to be handled inside the service itself. // GracefulStop must gracefully stop the service and make the Serve call return. // // Since Service is managed by Group, it is considered a design flaw to call any // of the Service methods directly in application code. type Service interface { // Unit for Group registration and identification Unit // Serve starts the GroupService and blocks. Serve() StopNotify // GracefulStop shuts down and cleans up the GroupService. GracefulStop() } // Group builds on https://github.com/oklog/run to provide a deterministic way // to manage service lifecycles. It allows for easy composition of elegant // monoliths as well as adding signal handlers, metrics services, etc. type Group struct { f *FlagSet readyCh chan struct{} log *logger.Logger name string r run.Group c []Config p []PreRunner s []Service showRunGroup bool configured bool } // NewGroup return a Group with input name. func NewGroup(name string) Group { return Group{ name: name, readyCh: make(chan struct{}), } } // Name shows the name of the group. func (g Group) Name() string { return g.name } // Register will inspect the provided objects implementing the Unit interface to // see if it needs to register the objects for any of the Group bootstrap // phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored // by Group. // The returned array of booleans is of the same size as the amount of provided // Units, signaling for each provided Unit if it successfully registered with // Group for at least one of the bootstrap phases or if it was ignored. func (g *Group)
(units ...Unit) []bool { g.log = logger.GetLogger(g.name) hasRegistered := make([]bool, len(units)) for idx := range units { if !g.configured { // if RunConfig has been called we can no longer register Config // phases of Units if c, ok := units[idx].(Config); ok { g.c = append(g.c, c) hasRegistered[idx] = true } } if p, ok := units[idx].(PreRunner); ok { g.p = append(g.p, p) hasRegistered[idx] = true } if s, ok := units[idx].(Service); ok { g.s = append(g.s, s) hasRegistered[idx] = true } } return hasRegistered } // RegisterFlags returns FlagSet contains Flags in all modules. func (g *Group) RegisterFlags() *FlagSet { // run configuration stage g.f = NewFlagSet(g.name) g.f.SortFlags = false // keep order of flag registration g.f.Usage = func() { fmt.Printf("Flags:\n") g.f.PrintDefaults() } gFS := NewFlagSet("Common Service options") gFS.SortFlags = false gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`) gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units") g.f.AddFlagSet(gFS.FlagSet) // register flags from attached Config objects fs := make([]*FlagSet, len(g.c)) for idx := range g.c { // a Namer might have been deregistered if g.c[idx] == nil { continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags") fs[idx] = g.c[idx].FlagSet() if fs[idx] == nil { // no FlagSet returned g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset") continue } fs[idx].VisitAll(func(f *pflag.Flag) { if g.f.Lookup(f.Name) != nil { // log duplicate flag g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag") return } g.f.AddFlag(f) }) } return g.f } // RunConfig runs the Config phase of all registered Config aware Units. // Only use this function if needing to add additional wiring between config // and (pre)run phases and a separate PreRunner phase is not an option. // In most cases it is best to use the Run method directly as it will run the // Config phase prior to executing the PreRunner and Service phases. // If an error is returned the application must shut down as it is considered // fatal. func (g *Group) RunConfig() (interrupted bool, err error) { g.log = logger.GetLogger(g.name) g.configured = true if g.name == "" { // use the binary name if custom name has not been provided g.name = path.Base(os.Args[0]) } defer func() { if err != nil { g.log.Error().Err(err).Msg("unexpected exit") } }() // Load config from env and file if err = config.Load(g.f.Name, g.f.FlagSet); err != nil { return false, errors.Wrapf(err, "%s fails to load config", g.f.Name) } // bail early on help or version requests switch { case g.showRunGroup: fmt.Println(g.ListUnits()) return true, nil } // Validate Config inputs for idx := range g.c { // a Config might have been deregistered during Run if g.c[idx] == nil { g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate") continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config") if vErr := g.c[idx].Validate(); vErr != nil { err = multierr.Append(err, vErr) } } // exit on at least one Validate error if err != nil { return false, err } // log binary name and version g.log.Info().Msg("started") return false, nil } // Run will execute all phases of all registered Units and block until an error // occurs. // If RunConfig has been called prior to Run, the Group's Config phase will be // skipped and Run continues with the PreRunner and Service phases. // // The following phases are executed in the following sequence: // // Config phase (serially, in order of Unit registration) // - FlagSet() Get & register all FlagSets from Config Units. // - Flag Parsing Using the provided args (os.Args if empty) // - Validate() Validate Config Units. Exit on first error. // // PreRunner phase (serially, in order of Unit registration) // - PreRun() Execute PreRunner Units. Exit on first error. // // Service phase (concurrently) // - Serve() Execute all Service Units in separate Go routines. // - Wait Block until one of the Serve() methods returns // - GracefulStop()
Register
identifier_name
run.go
configured bool } // NewGroup return a Group with input name. func NewGroup(name string) Group { return Group{ name: name, readyCh: make(chan struct{}), } } // Name shows the name of the group. func (g Group) Name() string { return g.name } // Register will inspect the provided objects implementing the Unit interface to // see if it needs to register the objects for any of the Group bootstrap // phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored // by Group. // The returned array of booleans is of the same size as the amount of provided // Units, signaling for each provided Unit if it successfully registered with // Group for at least one of the bootstrap phases or if it was ignored. func (g *Group) Register(units ...Unit) []bool { g.log = logger.GetLogger(g.name) hasRegistered := make([]bool, len(units)) for idx := range units { if !g.configured { // if RunConfig has been called we can no longer register Config // phases of Units if c, ok := units[idx].(Config); ok { g.c = append(g.c, c) hasRegistered[idx] = true } } if p, ok := units[idx].(PreRunner); ok { g.p = append(g.p, p) hasRegistered[idx] = true } if s, ok := units[idx].(Service); ok { g.s = append(g.s, s) hasRegistered[idx] = true } } return hasRegistered } // RegisterFlags returns FlagSet contains Flags in all modules. func (g *Group) RegisterFlags() *FlagSet { // run configuration stage g.f = NewFlagSet(g.name) g.f.SortFlags = false // keep order of flag registration g.f.Usage = func() { fmt.Printf("Flags:\n") g.f.PrintDefaults() } gFS := NewFlagSet("Common Service options") gFS.SortFlags = false gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`) gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units") g.f.AddFlagSet(gFS.FlagSet) // register flags from attached Config objects fs := make([]*FlagSet, len(g.c)) for idx := range g.c { // a Namer might have been deregistered if g.c[idx] == nil { continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags") fs[idx] = g.c[idx].FlagSet() if fs[idx] == nil { // no FlagSet returned g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset") continue } fs[idx].VisitAll(func(f *pflag.Flag) { if g.f.Lookup(f.Name) != nil { // log duplicate flag g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag") return } g.f.AddFlag(f) }) } return g.f } // RunConfig runs the Config phase of all registered Config aware Units. // Only use this function if needing to add additional wiring between config // and (pre)run phases and a separate PreRunner phase is not an option. // In most cases it is best to use the Run method directly as it will run the // Config phase prior to executing the PreRunner and Service phases. // If an error is returned the application must shut down as it is considered // fatal. func (g *Group) RunConfig() (interrupted bool, err error) { g.log = logger.GetLogger(g.name) g.configured = true if g.name == "" { // use the binary name if custom name has not been provided g.name = path.Base(os.Args[0]) } defer func() { if err != nil { g.log.Error().Err(err).Msg("unexpected exit") } }() // Load config from env and file if err = config.Load(g.f.Name, g.f.FlagSet); err != nil { return false, errors.Wrapf(err, "%s fails to load config", g.f.Name) } // bail early on help or version requests switch { case g.showRunGroup: fmt.Println(g.ListUnits()) return true, nil } // Validate Config inputs for idx := range g.c { // a Config might have been deregistered during Run if g.c[idx] == nil { g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate") continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config") if vErr := g.c[idx].Validate(); vErr != nil { err = multierr.Append(err, vErr) } } // exit on at least one Validate error if err != nil { return false, err } // log binary name and version g.log.Info().Msg("started") return false, nil } // Run will execute all phases of all registered Units and block until an error // occurs. // If RunConfig has been called prior to Run, the Group's Config phase will be // skipped and Run continues with the PreRunner and Service phases. // // The following phases are executed in the following sequence: // // Config phase (serially, in order of Unit registration) // - FlagSet() Get & register all FlagSets from Config Units. // - Flag Parsing Using the provided args (os.Args if empty) // - Validate() Validate Config Units. Exit on first error. // // PreRunner phase (serially, in order of Unit registration) // - PreRun() Execute PreRunner Units. Exit on first error. // // Service phase (concurrently) // - Serve() Execute all Service Units in separate Go routines. // - Wait Block until one of the Serve() methods returns // - GracefulStop() Call interrupt handlers of all Service Units. // // Run will return with the originating error on: // - first Config.Validate() returning an error // - first PreRunner.PreRun() returning an error // - first Service.Serve() returning (error or nil) func (g *Group) Run() (err error) { // run config registration and flag parsing stages if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil { return errRun } defer func() { if err != nil { g.log.Fatal().Err(err).Stack().Msg("unexpected exit") } }() // execute pre run stage and exit on error for idx := range g.p { // a PreRunner might have been deregistered during Run if g.p[idx] == nil { continue } g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run") if err := g.p[idx].PreRun(); err != nil { return err } } swg := &sync.WaitGroup{} swg.Add(len(g.s)) go func() { swg.Wait() close(g.readyCh) }() // feed our registered services to our internal run.Group for idx := range g.s { // a Service might have been deregistered during Run s := g.s[idx] if s == nil { continue } g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve") g.r.Add(func() error { notify := s.Serve() swg.Done() <-notify return nil }, func(_ error) { g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop") s.GracefulStop() }) } // start registered services and block return g.r.Run() } // ListUnits returns a list of all Group phases and the Units registered to each // of them. func (g Group) ListUnits() string
{ var ( s string t = "cli" ) if len(g.c) > 0 { s += "\n- config: " for _, u := range g.c { if u != nil { s += u.Name() + " " } } } if len(g.p) > 0 { s += "\n- prerun: " for _, u := range g.p { if u != nil { s += u.Name() + " " }
identifier_body
run.go
. func NewFlagSet(name string) *FlagSet { return &FlagSet{ FlagSet: pflag.NewFlagSet(name, pflag.ContinueOnError), Name: name, } } // Unit is the default interface an object needs to implement for it to be able // to register with a Group. // Name should return a short but good identifier of the Unit. type Unit interface { Name() string } // Config interface should be implemented by Group Unit objects that manage // their own configuration through the use of flags. // If a Unit's Validate returns an error it will stop the Group immediately. type Config interface { // Unit for Group registration and identification Unit // FlagSet returns an object's FlagSet FlagSet() *FlagSet // Validate checks an object's stored values Validate() error } // PreRunner interface should be implemented by Group Unit objects that need // a pre run stage before starting the Group Services. // If a Unit's PreRun returns an error it will stop the Group immediately. type PreRunner interface { // Unit for Group registration and identification Unit PreRun() error } // NewPreRunner takes a name and a standalone pre runner compatible function // and turns them into a Group compatible PreRunner, ready for registration. func NewPreRunner(name string, fn func() error) PreRunner { return preRunner{name: name, fn: fn} } type preRunner struct { fn func() error name string } func (p preRunner) Name() string { return p.name } func (p preRunner) PreRun() error { return p.fn() } // StopNotify sends the stopped event to the running system. type StopNotify <-chan struct{} // Service interface should be implemented by Group Unit objects that need // to run a blocking service until an error occurs or a shutdown request is // made. // The Serve method must be blocking and return an error on unexpected shutdown. // Recoverable errors need to be handled inside the service itself. // GracefulStop must gracefully stop the service and make the Serve call return. // // Since Service is managed by Group, it is considered a design flaw to call any // of the Service methods directly in application code. type Service interface { // Unit for Group registration and identification Unit // Serve starts the GroupService and blocks. Serve() StopNotify // GracefulStop shuts down and cleans up the GroupService. GracefulStop() } // Group builds on https://github.com/oklog/run to provide a deterministic way // to manage service lifecycles. It allows for easy composition of elegant // monoliths as well as adding signal handlers, metrics services, etc. type Group struct { f *FlagSet readyCh chan struct{} log *logger.Logger name string r run.Group c []Config p []PreRunner s []Service showRunGroup bool configured bool } // NewGroup return a Group with input name. func NewGroup(name string) Group { return Group{ name: name, readyCh: make(chan struct{}), } } // Name shows the name of the group. func (g Group) Name() string { return g.name } // Register will inspect the provided objects implementing the Unit interface to // see if it needs to register the objects for any of the Group bootstrap // phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored // by Group. // The returned array of booleans is of the same size as the amount of provided // Units, signaling for each provided Unit if it successfully registered with // Group for at least one of the bootstrap phases or if it was ignored. func (g *Group) Register(units ...Unit) []bool { g.log = logger.GetLogger(g.name) hasRegistered := make([]bool, len(units)) for idx := range units { if !g.configured { // if RunConfig has been called we can no longer register Config // phases of Units if c, ok := units[idx].(Config); ok { g.c = append(g.c, c) hasRegistered[idx] = true } } if p, ok := units[idx].(PreRunner); ok { g.p = append(g.p, p) hasRegistered[idx] = true } if s, ok := units[idx].(Service); ok { g.s = append(g.s, s) hasRegistered[idx] = true } } return hasRegistered } // RegisterFlags returns FlagSet contains Flags in all modules. func (g *Group) RegisterFlags() *FlagSet { // run configuration stage g.f = NewFlagSet(g.name) g.f.SortFlags = false // keep order of flag registration g.f.Usage = func() { fmt.Printf("Flags:\n") g.f.PrintDefaults() } gFS := NewFlagSet("Common Service options") gFS.SortFlags = false gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`) gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units") g.f.AddFlagSet(gFS.FlagSet) // register flags from attached Config objects fs := make([]*FlagSet, len(g.c)) for idx := range g.c { // a Namer might have been deregistered if g.c[idx] == nil { continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags") fs[idx] = g.c[idx].FlagSet() if fs[idx] == nil { // no FlagSet returned g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset") continue } fs[idx].VisitAll(func(f *pflag.Flag) { if g.f.Lookup(f.Name) != nil { // log duplicate flag g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag") return } g.f.AddFlag(f) }) } return g.f } // RunConfig runs the Config phase of all registered Config aware Units. // Only use this function if needing to add additional wiring between config // and (pre)run phases and a separate PreRunner phase is not an option. // In most cases it is best to use the Run method directly as it will run the // Config phase prior to executing the PreRunner and Service phases. // If an error is returned the application must shut down as it is considered // fatal. func (g *Group) RunConfig() (interrupted bool, err error) { g.log = logger.GetLogger(g.name) g.configured = true if g.name == "" { // use the binary name if custom name has not been provided g.name = path.Base(os.Args[0]) } defer func() { if err != nil { g.log.Error().Err(err).Msg("unexpected exit") } }() // Load config from env and file if err = config.Load(g.f.Name, g.f.FlagSet); err != nil { return false, errors.Wrapf(err, "%s fails to load config", g.f.Name) } // bail early on help or version requests switch {
fmt.Println(g.ListUnits()) return true, nil } // Validate Config inputs for idx := range g.c { // a Config might have been deregistered during Run if g.c[idx] == nil { g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate") continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config") if vErr := g.c[idx].Validate(); vErr != nil { err = multierr.Append(err, vErr) } } // exit on at least one Validate error if err != nil { return false, err } // log binary name and version g.log.Info().Msg("started") return false, nil } // Run will execute all phases of all registered Units and block until an error // occurs. // If RunConfig has been called prior to Run, the Group's Config phase will be // skipped and Run continues with the PreRunner and Service phases. // // The following phases are executed in the following sequence: // // Config phase (serially, in order of Unit registration) // - FlagSet() Get & register all FlagSets from Config Units. // - Flag Parsing Using the provided args (os.Args if empty) // - Validate() Validate Config Units. Exit on first error. // // PreRunner phase (serially, in order of Unit registration) // - PreRun() Execute PreRunner Units. Exit on first error. // // Service phase (concurrently) // - Serve() Execute all Service Units in separate Go routines. // - Wait Block until one of the Serve() methods returns // - GracefulStop()
case g.showRunGroup:
random_line_split
run.go
*FlagSet readyCh chan struct{} log *logger.Logger name string r run.Group c []Config p []PreRunner s []Service showRunGroup bool configured bool } // NewGroup return a Group with input name. func NewGroup(name string) Group { return Group{ name: name, readyCh: make(chan struct{}), } } // Name shows the name of the group. func (g Group) Name() string { return g.name } // Register will inspect the provided objects implementing the Unit interface to // see if it needs to register the objects for any of the Group bootstrap // phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored // by Group. // The returned array of booleans is of the same size as the amount of provided // Units, signaling for each provided Unit if it successfully registered with // Group for at least one of the bootstrap phases or if it was ignored. func (g *Group) Register(units ...Unit) []bool { g.log = logger.GetLogger(g.name) hasRegistered := make([]bool, len(units)) for idx := range units { if !g.configured { // if RunConfig has been called we can no longer register Config // phases of Units if c, ok := units[idx].(Config); ok { g.c = append(g.c, c) hasRegistered[idx] = true } } if p, ok := units[idx].(PreRunner); ok { g.p = append(g.p, p) hasRegistered[idx] = true } if s, ok := units[idx].(Service); ok { g.s = append(g.s, s) hasRegistered[idx] = true } } return hasRegistered } // RegisterFlags returns FlagSet contains Flags in all modules. func (g *Group) RegisterFlags() *FlagSet { // run configuration stage g.f = NewFlagSet(g.name) g.f.SortFlags = false // keep order of flag registration g.f.Usage = func() { fmt.Printf("Flags:\n") g.f.PrintDefaults() } gFS := NewFlagSet("Common Service options") gFS.SortFlags = false gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`) gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units") g.f.AddFlagSet(gFS.FlagSet) // register flags from attached Config objects fs := make([]*FlagSet, len(g.c)) for idx := range g.c { // a Namer might have been deregistered if g.c[idx] == nil { continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags") fs[idx] = g.c[idx].FlagSet() if fs[idx] == nil { // no FlagSet returned g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset") continue } fs[idx].VisitAll(func(f *pflag.Flag) { if g.f.Lookup(f.Name) != nil { // log duplicate flag g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag") return } g.f.AddFlag(f) }) } return g.f } // RunConfig runs the Config phase of all registered Config aware Units. // Only use this function if needing to add additional wiring between config // and (pre)run phases and a separate PreRunner phase is not an option. // In most cases it is best to use the Run method directly as it will run the // Config phase prior to executing the PreRunner and Service phases. // If an error is returned the application must shut down as it is considered // fatal. func (g *Group) RunConfig() (interrupted bool, err error) { g.log = logger.GetLogger(g.name) g.configured = true if g.name == "" { // use the binary name if custom name has not been provided g.name = path.Base(os.Args[0]) } defer func() { if err != nil { g.log.Error().Err(err).Msg("unexpected exit") } }() // Load config from env and file if err = config.Load(g.f.Name, g.f.FlagSet); err != nil { return false, errors.Wrapf(err, "%s fails to load config", g.f.Name) } // bail early on help or version requests switch { case g.showRunGroup: fmt.Println(g.ListUnits()) return true, nil } // Validate Config inputs for idx := range g.c { // a Config might have been deregistered during Run if g.c[idx] == nil { g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate") continue } g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config") if vErr := g.c[idx].Validate(); vErr != nil { err = multierr.Append(err, vErr) } } // exit on at least one Validate error if err != nil { return false, err } // log binary name and version g.log.Info().Msg("started") return false, nil } // Run will execute all phases of all registered Units and block until an error // occurs. // If RunConfig has been called prior to Run, the Group's Config phase will be // skipped and Run continues with the PreRunner and Service phases. // // The following phases are executed in the following sequence: // // Config phase (serially, in order of Unit registration) // - FlagSet() Get & register all FlagSets from Config Units. // - Flag Parsing Using the provided args (os.Args if empty) // - Validate() Validate Config Units. Exit on first error. // // PreRunner phase (serially, in order of Unit registration) // - PreRun() Execute PreRunner Units. Exit on first error. // // Service phase (concurrently) // - Serve() Execute all Service Units in separate Go routines. // - Wait Block until one of the Serve() methods returns // - GracefulStop() Call interrupt handlers of all Service Units. // // Run will return with the originating error on: // - first Config.Validate() returning an error // - first PreRunner.PreRun() returning an error // - first Service.Serve() returning (error or nil) func (g *Group) Run() (err error) { // run config registration and flag parsing stages if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil { return errRun } defer func() { if err != nil { g.log.Fatal().Err(err).Stack().Msg("unexpected exit") } }() // execute pre run stage and exit on error for idx := range g.p { // a PreRunner might have been deregistered during Run if g.p[idx] == nil { continue } g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run") if err := g.p[idx].PreRun(); err != nil { return err } } swg := &sync.WaitGroup{} swg.Add(len(g.s)) go func() { swg.Wait() close(g.readyCh) }() // feed our registered services to our internal run.Group for idx := range g.s { // a Service might have been deregistered during Run s := g.s[idx] if s == nil { continue } g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve") g.r.Add(func() error { notify := s.Serve() swg.Done() <-notify return nil }, func(_ error) { g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop") s.GracefulStop() }) } // start registered services and block return g.r.Run() } // ListUnits returns a list of all Group phases and the Units registered to each // of them. func (g Group) ListUnits() string { var ( s string t = "cli" ) if len(g.c) > 0
{ s += "\n- config: " for _, u := range g.c { if u != nil { s += u.Name() + " " } } }
conditional_block
test.pb.go
PrintKVRequest_Value `protobuf_oneof:"Value"` } func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} } func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) } func (*PrintKVRequest) ProtoMessage() {} func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type isPrintKVRequest_Value interface { isPrintKVRequest_Value() } type PrintKVRequest_ValueString struct { ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"` } type PrintKVRequest_ValueInt struct { ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"` } func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {} func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {} func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value { if m != nil { return m.Value } return nil } func (m *PrintKVRequest) GetKey() string { if m != nil { return m.Key } return "" } func (m *PrintKVRequest) GetValueString() string
func (m *PrintKVRequest) GetValueInt() int32 { if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok { return x.ValueInt } return 0 } // XXX_OneofFuncs is for the internal use of the proto package. func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{ (*PrintKVRequest_ValueString)(nil), (*PrintKVRequest_ValueInt)(nil), } } func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeStringBytes(x.ValueString) case *PrintKVRequest_ValueInt: b.EncodeVarint(3<<3 | proto.WireVarint) b.EncodeVarint(uint64(x.ValueInt)) case nil: default: return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x) } return nil } func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*PrintKVRequest) switch tag { case 2: // Value.ValueString if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Value = &PrintKVRequest_ValueString{x} return true, err case 3: // Value.ValueInt if wire != proto.WireVarint { return true, proto.ErrInternalBadWireType } x, err := b.DecodeVarint() m.Value = &PrintKVRequest_ValueInt{int32(x)} return true, err default: return false, nil } } func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.ValueString))) n += len(x.ValueString) case *PrintKVRequest_ValueInt: n += proto.SizeVarint(3<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.ValueInt)) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type PrintKVResponse struct { } func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} } func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) } func (*PrintKVResponse) ProtoMessage() {} func (*PrintKVResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func init() { proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest") proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse") proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest") proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Test service type TestClient interface { Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) } type testClient struct { cc *grpc.ClientConn } func NewTestClient(cc *grpc.ClientConn) TestClient { return &testClient{cc} } func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) { out := new(TestResponse) err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) { out := new(PrintKVResponse) err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Test service type TestServer interface { Double(context.Context, *TestRequest) (*TestResponse, error) PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error) } func RegisterTestServer(s *grpc.Server, srv TestServer) { s.RegisterService(&_Test_serviceDesc, srv) } func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TestRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServer).Double(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/Double", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).Double(ctx, req.(*TestRequest)) } return interceptor(ctx, in, info, handler) } func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PrintKVRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServer).PrintKV(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/PrintKV", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest)) } return interceptor(ctx, in, info, handler) } var _Test_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpctest.Test", HandlerType: (*TestServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Double", Handler: _Test_Double_Handler, }, { MethodName: "PrintKV", Handler: _Test_PrintKV_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "test.proto", } func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 240 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17
{ if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok { return x.ValueString } return "" }
identifier_body
test.pb.go
PrintKVRequest_Value `protobuf_oneof:"Value"` } func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} } func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) } func (*PrintKVRequest) ProtoMessage() {} func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type isPrintKVRequest_Value interface { isPrintKVRequest_Value() } type PrintKVRequest_ValueString struct { ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"` } type PrintKVRequest_ValueInt struct { ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"` } func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {} func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {} func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value { if m != nil { return m.Value } return nil } func (m *PrintKVRequest) GetKey() string { if m != nil { return m.Key } return "" } func (m *PrintKVRequest) GetValueString() string { if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok { return x.ValueString } return "" } func (m *PrintKVRequest) GetValueInt() int32 { if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok { return x.ValueInt } return 0 } // XXX_OneofFuncs is for the internal use of the proto package. func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{ (*PrintKVRequest_ValueString)(nil), (*PrintKVRequest_ValueInt)(nil), } } func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeStringBytes(x.ValueString) case *PrintKVRequest_ValueInt: b.EncodeVarint(3<<3 | proto.WireVarint) b.EncodeVarint(uint64(x.ValueInt)) case nil: default: return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x) } return nil } func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*PrintKVRequest) switch tag { case 2: // Value.ValueString if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Value = &PrintKVRequest_ValueString{x} return true, err case 3: // Value.ValueInt if wire != proto.WireVarint { return true, proto.ErrInternalBadWireType } x, err := b.DecodeVarint() m.Value = &PrintKVRequest_ValueInt{int32(x)} return true, err default: return false, nil } } func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.ValueString))) n += len(x.ValueString) case *PrintKVRequest_ValueInt: n += proto.SizeVarint(3<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.ValueInt)) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type PrintKVResponse struct { } func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} } func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) } func (*PrintKVResponse) ProtoMessage() {} func (*PrintKVResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func init() { proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest") proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse") proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest") proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Test service type TestClient interface { Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) } type testClient struct { cc *grpc.ClientConn } func NewTestClient(cc *grpc.ClientConn) TestClient { return &testClient{cc} } func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) { out := new(TestResponse) err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) { out := new(PrintKVResponse) err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Test service type TestServer interface { Double(context.Context, *TestRequest) (*TestResponse, error) PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error) } func RegisterTestServer(s *grpc.Server, srv TestServer) { s.RegisterService(&_Test_serviceDesc, srv) } func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TestRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil
info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/Double", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).Double(ctx, req.(*TestRequest)) } return interceptor(ctx, in, info, handler) } func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PrintKVRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServer).PrintKV(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/PrintKV", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest)) } return interceptor(ctx, in, info, handler) } var _Test_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpctest.Test", HandlerType: (*TestServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Double", Handler: _Test_Double_Handler, }, { MethodName: "PrintKV", Handler: _Test_PrintKV_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "test.proto", } func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 240 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17
{ return srv.(TestServer).Double(ctx, in) }
conditional_block
test.pb.go
isPrintKVRequest_Value `protobuf_oneof:"Value"` } func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} } func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) } func (*PrintKVRequest) ProtoMessage() {} func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type isPrintKVRequest_Value interface { isPrintKVRequest_Value() } type PrintKVRequest_ValueString struct { ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"` } type PrintKVRequest_ValueInt struct { ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"` } func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {} func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {} func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value { if m != nil { return m.Value } return nil } func (m *PrintKVRequest) GetKey() string { if m != nil { return m.Key } return "" } func (m *PrintKVRequest) GetValueString() string { if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok { return x.ValueString } return "" } func (m *PrintKVRequest) GetValueInt() int32 { if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok { return x.ValueInt } return 0 } // XXX_OneofFuncs is for the internal use of the proto package. func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{ (*PrintKVRequest_ValueString)(nil), (*PrintKVRequest_ValueInt)(nil), } } func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeStringBytes(x.ValueString) case *PrintKVRequest_ValueInt: b.EncodeVarint(3<<3 | proto.WireVarint) b.EncodeVarint(uint64(x.ValueInt)) case nil: default: return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x) } return nil } func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*PrintKVRequest) switch tag { case 2: // Value.ValueString if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Value = &PrintKVRequest_ValueString{x} return true, err case 3: // Value.ValueInt if wire != proto.WireVarint { return true, proto.ErrInternalBadWireType } x, err := b.DecodeVarint() m.Value = &PrintKVRequest_ValueInt{int32(x)} return true, err default: return false, nil } } func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.ValueString))) n += len(x.ValueString) case *PrintKVRequest_ValueInt: n += proto.SizeVarint(3<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.ValueInt)) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type PrintKVResponse struct { } func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} } func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) } func (*PrintKVResponse) ProtoMessage() {} func (*PrintKVResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func init() { proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest") proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse") proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest") proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Test service type TestClient interface { Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) } type testClient struct { cc *grpc.ClientConn } func NewTestClient(cc *grpc.ClientConn) TestClient { return &testClient{cc} } func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) { out := new(TestResponse) err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) { out := new(PrintKVResponse) err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil
} // Server API for Test service type TestServer interface { Double(context.Context, *TestRequest) (*TestResponse, error) PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error) } func RegisterTestServer(s *grpc.Server, srv TestServer) { s.RegisterService(&_Test_serviceDesc, srv) } func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TestRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServer).Double(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/Double", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).Double(ctx, req.(*TestRequest)) } return interceptor(ctx, in, info, handler) } func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PrintKVRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServer).PrintKV(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/PrintKV", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest)) } return interceptor(ctx, in, info, handler) } var _Test_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpctest.Test", HandlerType: (*TestServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Double", Handler: _Test_Double_Handler, }, { MethodName: "PrintKV", Handler: _Test_PrintKV_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "test.proto", } func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 240 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
random_line_split
test.pb.go
PrintKVRequest_Value `protobuf_oneof:"Value"` } func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} } func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) } func (*PrintKVRequest) ProtoMessage() {} func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type isPrintKVRequest_Value interface { isPrintKVRequest_Value() } type PrintKVRequest_ValueString struct { ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"` } type PrintKVRequest_ValueInt struct { ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"` } func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {} func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {} func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value { if m != nil { return m.Value } return nil } func (m *PrintKVRequest) GetKey() string { if m != nil { return m.Key } return "" } func (m *PrintKVRequest) GetValueString() string { if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok { return x.ValueString } return "" } func (m *PrintKVRequest) GetValueInt() int32 { if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok { return x.ValueInt } return 0 } // XXX_OneofFuncs is for the internal use of the proto package. func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{ (*PrintKVRequest_ValueString)(nil), (*PrintKVRequest_ValueInt)(nil), } } func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeStringBytes(x.ValueString) case *PrintKVRequest_ValueInt: b.EncodeVarint(3<<3 | proto.WireVarint) b.EncodeVarint(uint64(x.ValueInt)) case nil: default: return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x) } return nil } func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*PrintKVRequest) switch tag { case 2: // Value.ValueString if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Value = &PrintKVRequest_ValueString{x} return true, err case 3: // Value.ValueInt if wire != proto.WireVarint { return true, proto.ErrInternalBadWireType } x, err := b.DecodeVarint() m.Value = &PrintKVRequest_ValueInt{int32(x)} return true, err default: return false, nil } } func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) { m := msg.(*PrintKVRequest) // Value switch x := m.Value.(type) { case *PrintKVRequest_ValueString: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.ValueString))) n += len(x.ValueString) case *PrintKVRequest_ValueInt: n += proto.SizeVarint(3<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.ValueInt)) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type PrintKVResponse struct { } func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} } func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) } func (*PrintKVResponse) ProtoMessage() {} func (*PrintKVResponse)
() ([]byte, []int) { return fileDescriptor0, []int{3} } func init() { proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest") proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse") proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest") proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Test service type TestClient interface { Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) } type testClient struct { cc *grpc.ClientConn } func NewTestClient(cc *grpc.ClientConn) TestClient { return &testClient{cc} } func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) { out := new(TestResponse) err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) { out := new(PrintKVResponse) err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Test service type TestServer interface { Double(context.Context, *TestRequest) (*TestResponse, error) PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error) } func RegisterTestServer(s *grpc.Server, srv TestServer) { s.RegisterService(&_Test_serviceDesc, srv) } func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TestRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServer).Double(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/Double", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).Double(ctx, req.(*TestRequest)) } return interceptor(ctx, in, info, handler) } func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PrintKVRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServer).PrintKV(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpctest.Test/PrintKV", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest)) } return interceptor(ctx, in, info, handler) } var _Test_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpctest.Test", HandlerType: (*TestServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Double", Handler: _Test_Double_Handler, }, { MethodName: "PrintKV", Handler: _Test_PrintKV_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "test.proto", } func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 240 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
Descriptor
identifier_name
mesintiket-gen3.py
(QtCore.QThread): update = QtCore.pyqtSignal(str) def __init__(self, tosay): QtCore.QThread.__init__(self) self.tosay = tosay def __del__(self): self.wait() def run(self): subprocess.call('espeak -vid+f3 "%s"' % self.tosay, shell=True) #~ self.terminate() class MainApp(QtCore.QObject): def __init__(self): QtCore.QObject.__init__(self) self.context = zmq.Context() self.dblayer = self.context.socket(zmq.REQ) self.dblayer.connect("tcp://%s:%s" % (config.server_ip, config.server_port)) self.redis = redis.Redis('localhost') self.route = Route(config.route, config.destinations) # start new thread to listen to gps signal self.gpsThread = GpsListener(configusb.gpsusbport, config.gps_baudrate) self.gpsThread.message.connect(self.gpsReceived) self.gpsThread.sat_info.connect(self.sat_infoReceived) self.gpsThread.speed.connect(self.speedReceived) self.gpsThread.start() # start new thread to listen to gpio signal dests = [] for d in config.destinations[0]: dests.append(d) self.gpioThread = GPIOListener(dests) self.gpioThread.destinationPressed.connect(self.destinationChosen) self.gpioThread.printPressed.connect(self.printTicket) self.gpioThread.directionSwitched.connect(self.switchDirection) self.gpioThread.start() # Timer for sending position every 60 seconds to server self.sendGpsTimer = QtCore.QTimer(self) self.sendGpsTimer.timeout.connect(self.sendGpsPosition) self.sendGpsTimer.start(60000) # current state (Agent, destination, price) self.currentAgent = None self.currentDestination = None self.currentDistance = None self.currentLon = None self.currentLat = None # print init messages to printer p = PrinterC1(config.printer_port, 9600) p.selectFont1(2) p.printString('Mesin Tiket Bus') p.printString( config.company_name) p.printString( 'Bus: %s' % config.bus_plateno) p.cutPaper(0) p.close() # init LCD #GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers #GPIO.setup(LCD_E, GPIO.OUT) # E #GPIO.setup(LCD_E2, GPIO.OUT) # E2 #GPIO.setup(LCD_RS, GPIO.OUT) # RS #GPIO.setup(LCD_D4, GPIO.OUT) # DB4 #GPIO.setup(LCD_D5, GPIO.OUT) # DB5 #GPIO.setup(LCD_D6, GPIO.OUT) # DB6 #GPIO.setup(LCD_D7, GPIO.OUT) # DB7 #GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable # Initialise display lcd_init() lcd_string('Inisiasi sistem selesai..', 1, 1) self.updateRouteDisplay() logger.debug('init finished') self.say('Mesin tiket siap digunakan') def sendGpsPosition(self): #~ logger.debug(self.gpsThread.lastpos) try: if self.gpsThread.lastpos: # ITPRO861001000786141,11025.595867,-659.625256,31,20121008035615.000,15,0,13,1, gprmclon = 100 *(int(self.gpsThread.lastpos['lon']) + ((self.gpsThread.lastpos['lon'] - int(self.gpsThread.lastpos['lon'])) / 100 * 60)) gprmclat = 100 *(int(self.gpsThread.lastpos['lat']) + ((self.gpsThread.lastpos['lat'] - int(self.gpsThread.lastpos['lat'])) / 100 * 60)) gpsmsg = 'ITPRO%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n' % ( config.bus_plateno, gprmclon, gprmclat, self.gpsThread.lastpos['alt'], time.strftime("%Y%m%d%H%M%S", time.gmtime(self.gpsThread.lastpos['gpsdt'])), self.gpsThread.lastpos['no_sat'], self.gpsThread.lastpos['speed'], self.gpsThread.lastpos['bearing'], 'A', self.gpsThread.lastpos['ext_power'], '', ) logger.debug('SENDGPSINFO: %s' % gpsmsg) self.redis.rpush('mq', gpsmsg) else: logger.info('SENDGPSINFO: GPS not set, not sending position to server..') except Exception: e = sys.exc_info() logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1])) def gpsReceived(self, gpsPos): #~ print newpos logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos))) if gpsPos['type'] == 0: if gpsPos['lon'] and gpsPos['lat']: #self.updateTrackPosition(gpsPos) #lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3) self.currentLon = gpsPos['lon'] self.currentLat = gpsPos['lat'] curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents) if self.currentAgent != curAgent: self.updateCurrentAgent(curAgent) #self.updateDestinations() if not curAgent: self.updateCurrentAgent('Di luar area') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') self.currentAgent = curAgent else: self.updateCurrentAgent('Belum mendapat sinyal GPS..') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') def updateCurrentAgent(self, newAgent): logger.debug('updateCurrentagent: %s' % newAgent) lcd_string('Agen: %s' % newAgent, 1, 1) def updateDestinationPriceDistance(self, dest, price, distance): if dest != '---': lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2) self.currentDestination = dest self.currentPrice = price self.currentDistance = distance else: lcd_string("Tujuan: --- Harga: ---", 1, 2) self.currentDestination = None self.currentPrice = None self.currentDistance = None def updateStatus(self, status, showTime): lcd_string('{0}'.format(status), 1, 3) self.say(status) QtCore.QTimer.singleShot(showTime, self.resetStatus) def resetStatus(self): lcd_string('', 1, 3) def speedReceived(self, speed): lcd_goto(('%s kmh' % speed).ljust(7),0,4) def sat_infoReceived(self, sat_info): lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4) def updateNoTicket(self, ticket_no): lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4) def updateRouteDisplay(self): dests = self.route.getDestinationNames() lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4) def getAgentInCurrentLocation(self, gpsPos, agents): #~ print curPos, agents for agent in agents: dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon'])) #~ print dist if dist <= (agents[agent]['radius'] / 1000.0): return agent return None def destinationChosen(self, dest_qstring): dest = str(dest_qstring) if dest: if self.currentAgent: # check if selected destination is valid if self.route.simpleDistanceTo(config.agents[self.currentAgent]['latlon'], normalized = True) < self.route.simpleDistanceTo(self.route.getDestinations()[dest]['latlon'], normalized = True): distance = self.route.distanceTo( config.agents[self.currentAgent]['latlon'], self.route.getDestinations()[dest]['latlon'] ) price = self.calculatePrice(self.currentAgent,
SpeechThread
identifier_name
mesintiket-gen3.py
_power'], '', ) logger.debug('SENDGPSINFO: %s' % gpsmsg) self.redis.rpush('mq', gpsmsg) else: logger.info('SENDGPSINFO: GPS not set, not sending position to server..') except Exception: e = sys.exc_info() logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1])) def gpsReceived(self, gpsPos): #~ print newpos logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos))) if gpsPos['type'] == 0: if gpsPos['lon'] and gpsPos['lat']: #self.updateTrackPosition(gpsPos) #lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3) self.currentLon = gpsPos['lon'] self.currentLat = gpsPos['lat'] curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents) if self.currentAgent != curAgent: self.updateCurrentAgent(curAgent) #self.updateDestinations() if not curAgent: self.updateCurrentAgent('Di luar area') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') self.currentAgent = curAgent else: self.updateCurrentAgent('Belum mendapat sinyal GPS..') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') def updateCurrentAgent(self, newAgent): logger.debug('updateCurrentagent: %s' % newAgent) lcd_string('Agen: %s' % newAgent, 1, 1) def updateDestinationPriceDistance(self, dest, price, distance): if dest != '---': lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2) self.currentDestination = dest self.currentPrice = price self.currentDistance = distance else: lcd_string("Tujuan: --- Harga: ---", 1, 2) self.currentDestination = None self.currentPrice = None self.currentDistance = None def updateStatus(self, status, showTime): lcd_string('{0}'.format(status), 1, 3) self.say(status) QtCore.QTimer.singleShot(showTime, self.resetStatus) def resetStatus(self): lcd_string('', 1, 3) def speedReceived(self, speed): lcd_goto(('%s kmh' % speed).ljust(7),0,4) def sat_infoReceived(self, sat_info): lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4) def updateNoTicket(self, ticket_no): lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4) def updateRouteDisplay(self): dests = self.route.getDestinationNames() lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4) def getAgentInCurrentLocation(self, gpsPos, agents): #~ print curPos, agents for agent in agents: dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon'])) #~ print dist if dist <= (agents[agent]['radius'] / 1000.0): return agent return None def destinationChosen(self, dest_qstring): dest = str(dest_qstring) if dest: if self.currentAgent: # check if selected destination is valid if self.route.simpleDistanceTo(config.agents[self.currentAgent]['latlon'], normalized = True) < self.route.simpleDistanceTo(self.route.getDestinations()[dest]['latlon'], normalized = True): distance = self.route.distanceTo( config.agents[self.currentAgent]['latlon'], self.route.getDestinations()[dest]['latlon'] ) price = self.calculatePrice(self.currentAgent, dest, distance) self.updateDestinationPriceDistance(dest, price, distance) self.say(dest) else: self.updateDestinationPriceDistance('---', '---', '---') self.updateStatus('Tujuan tidak valid', 2000) else: self.updateStatus('Di luar agen', 2000) self.updateDestinationPriceDistance('---', '---', '---') else: self.updateDestinationPriceDistance('---', '---', '---') self.updateStatus('Error pemilihan tujuan', 2000) def say(self, tosay): subprocess.call('espeak -vid+f3 "%s" 2>/dev/null &' % tosay, shell=True) #~ speechThread = SpeechThread(tosay) #~ speechThread.start() pass def calculatePrice(self, fromAgent, destination, distance): print (fromAgent, destination, distance) for prices in self.route.getDestinations()[destination]['pricelist']: if fromAgent in prices['from']: return prices['price'] #~ return max(config.minimal_price, math.ceil((distance*config.price_per_km)/1000.0) * 1000) return 0 def printTicket(self): if self.currentAgent in config.agents: if self.currentDestination: # if any destination selected dest = self.currentDestination # print ticket #~ try: if config.printer_enabled: self.say('Mencetak tiket ke %s' % dest) gpsdt = self.gpsThread.lastpos['gpsdt'] curdt = datetime.fromtimestamp(gpsdt) # initialize or increment global ticket counter if not self.redis.get('discountTicketCounter'): self.redis.set('discountTicketCounter', 0) self.redis.incr('discountTicketCounter') # initialize or increment daily ticket counter if not self.redis.get(curdt.strftime('%Y%m%d:ticket_no')): self.redis.set(curdt.strftime('%Y%m%d:ticket_no'), 0) self.redis.incr(curdt.strftime('%Y%m%d:ticket_no')) isTicketFree = False if int(self.redis.get('discountTicketCounter')) >= 100: isTicketFree = True self.redis.set('discountTicketCounter', 0) p = PrinterC1(config.printer_port, 9600) p.selectFont1(2) if isTicketFree: p.printString(config.company_name) p.printString(dest, 2, 4) p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4) p.printString('GRATIS PROMO') p.selectFont1(0) p.printString( 'Agen: %s' % self.currentAgent) p.printString('{0:.1f} km'.format(self.currentDistance)) p.printString( 'Bus: %s' % config.bus_plateno) p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no'))) p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt)) else: p.printString(config.company_name) p.printString(dest, 2, 4) p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4) p.printString('Rp {0:,}'.format(self.currentPrice), 2, 4) p.selectFont1(0) p.printString( 'Agen: %s' % self.currentAgent) p.printString('{0:.1f} km'.format(self.currentDistance)) p.printString( 'Bus: %s' % config.bus_plateno) p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no'))) p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt)) p.cutPaper(0) p.close() #~ print 'PO. Sumber Alam' #~ print config.bus_plateno #~ print curdt.strftime('%d-%b-%Y %H:%M', time.localtime(curtime)) #~ print 'Tujuan: {}'.format(destListBox.selectedItem) #~ print 'Jarak: 0{:.1f} km'.format(distance) #~ print 'Harga: Rp. {0:.0f}'.format(self.ui.lblPrice.text()) #~ print '%03d%010d' % (config.bus_id, int(curtime)) # initialize or add daily total setoran if not self.redis.get(curdt.strftime('%Y%m%d:setoran')):
self.redis.set(curdt.strftime('%Y%m%d:setoran'), 0)
conditional_block
mesintiket-gen3.py
import logging import logging.handlers logger = logging.getLogger('') logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s %(thread)d %(levelname)-5s %(message)s') fh = logging.handlers.RotatingFileHandler('log.txt', maxBytes=10000000, backupCount=5) fh.setFormatter(formatter) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(ch) PRINTER_PORT = '/dev/ttyAMA0' class SpeechThread(QtCore.QThread): update = QtCore.pyqtSignal(str) def __init__(self, tosay): QtCore.QThread.__init__(self) self.tosay = tosay def __del__(self): self.wait() def run(self): subprocess.call('espeak -vid+f3 "%s"' % self.tosay, shell=True) #~ self.terminate() class MainApp(QtCore.QObject): def __init__(self): QtCore.QObject.__init__(self) self.context = zmq.Context() self.dblayer = self.context.socket(zmq.REQ) self.dblayer.connect("tcp://%s:%s" % (config.server_ip, config.server_port)) self.redis = redis.Redis('localhost') self.route = Route(config.route, config.destinations) # start new thread to listen to gps signal self.gpsThread = GpsListener(configusb.gpsusbport, config.gps_baudrate) self.gpsThread.message.connect(self.gpsReceived) self.gpsThread.sat_info.connect(self.sat_infoReceived) self.gpsThread.speed.connect(self.speedReceived) self.gpsThread.start() # start new thread to listen to gpio signal dests = [] for d in config.destinations[0]: dests.append(d) self.gpioThread = GPIOListener(dests) self.gpioThread.destinationPressed.connect(self.destinationChosen) self.gpioThread.printPressed.connect(self.printTicket) self.gpioThread.directionSwitched.connect(self.switchDirection) self.gpioThread.start() # Timer for sending position every 60 seconds to server self.sendGpsTimer = QtCore.QTimer(self) self.sendGpsTimer.timeout.connect(self.sendGpsPosition) self.sendGpsTimer.start(60000) # current state (Agent, destination, price) self.currentAgent = None self.currentDestination = None self.currentDistance = None self.currentLon = None self.currentLat = None # print init messages to printer p = PrinterC1(config.printer_port, 9600) p.selectFont1(2) p.printString('Mesin Tiket Bus') p.printString( config.company_name) p.printString( 'Bus: %s' % config.bus_plateno) p.cutPaper(0) p.close() # init LCD #GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers #GPIO.setup(LCD_E, GPIO.OUT) # E #GPIO.setup(LCD_E2, GPIO.OUT) # E2 #GPIO.setup(LCD_RS, GPIO.OUT) # RS #GPIO.setup(LCD_D4, GPIO.OUT) # DB4 #GPIO.setup(LCD_D5, GPIO.OUT) # DB5 #GPIO.setup(LCD_D6, GPIO.OUT) # DB6 #GPIO.setup(LCD_D7, GPIO.OUT) # DB7 #GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable # Initialise display lcd_init() lcd_string('Inisiasi sistem selesai..', 1, 1) self.updateRouteDisplay() logger.debug('init finished') self.say('Mesin tiket siap digunakan') def sendGpsPosition(self): #~ logger.debug(self.gpsThread.lastpos) try: if self.gpsThread.lastpos: # ITPRO861001000786141,11025.595867,-659.625256,31,20121008035615.000,15,0,13,1, gprmclon = 100 *(int(self.gpsThread.lastpos['lon']) + ((self.gpsThread.lastpos['lon'] - int(self.gpsThread.lastpos['lon'])) / 100 * 60)) gprmclat = 100 *(int(self.gpsThread.lastpos['lat']) + ((self.gpsThread.lastpos['lat'] - int(self.gpsThread.lastpos['lat'])) / 100 * 60)) gpsmsg = 'ITPRO%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n' % ( config.bus_plateno, gprmclon, gprmclat, self.gpsThread.lastpos['alt'], time.strftime("%Y%m%d%H%M%S", time.gmtime(self.gpsThread.lastpos['gpsdt'])), self.gpsThread.lastpos['no_sat'], self.gpsThread.lastpos['speed'], self.gpsThread.lastpos['bearing'], 'A', self.gpsThread.lastpos['ext_power'], '', ) logger.debug('SENDGPSINFO: %s' % gpsmsg) self.redis.rpush('mq', gpsmsg) else: logger.info('SENDGPSINFO: GPS not set, not sending position to server..') except Exception: e = sys.exc_info() logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1])) def gpsReceived(self, gpsPos): #~ print newpos logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos))) if gpsPos['type'] == 0: if gpsPos['lon'] and gpsPos['lat']: #self.updateTrackPosition(gpsPos) #lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3) self.currentLon = gpsPos['lon'] self.currentLat = gpsPos['lat'] curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents) if self.currentAgent != curAgent: self.updateCurrentAgent(curAgent) #self.updateDestinations() if not curAgent: self.updateCurrentAgent('Di luar area') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') self.currentAgent = curAgent else: self.updateCurrentAgent('Belum mendapat sinyal GPS..') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') def updateCurrentAgent(self, newAgent): logger.debug('updateCurrentagent: %s' % newAgent) lcd_string('Agen: %s' % newAgent, 1, 1) def updateDestinationPriceDistance(self, dest, price, distance): if dest != '---': lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2) self.currentDestination = dest self.currentPrice = price self.currentDistance = distance else: lcd_string("Tujuan: --- Harga: ---", 1, 2) self.currentDestination = None self.currentPrice = None self.currentDistance = None def updateStatus(self, status, showTime): lcd_string('{0}'.format(status), 1, 3) self.say(status) QtCore.QTimer.singleShot(showTime, self.resetStatus) def resetStatus(self): lcd_string('', 1, 3) def speedReceived(self, speed): lcd_goto(('%s kmh' % speed).ljust(7),0,4) def sat_infoReceived(self, sat_info): lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4) def updateNoTicket(self, ticket_no): lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4) def updateRouteDisplay(self): dests = self.route.getDestinationNames() lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4) def getAgentInCurrentLocation(self, gpsPos, agents): #~ print curPos, agents for agent in agents: dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon'])) #~ print dist if dist <= (agents[agent]['radius'] / 10
from gpslistener import GpsListener from gpiolistener import GPIOListener from LCD40X4 import GPIO, lcd_init, lcd_goto, lcd_string, GPIO
random_line_split
mesintiket-gen3.py
# E #GPIO.setup(LCD_E2, GPIO.OUT) # E2 #GPIO.setup(LCD_RS, GPIO.OUT) # RS #GPIO.setup(LCD_D4, GPIO.OUT) # DB4 #GPIO.setup(LCD_D5, GPIO.OUT) # DB5 #GPIO.setup(LCD_D6, GPIO.OUT) # DB6 #GPIO.setup(LCD_D7, GPIO.OUT) # DB7 #GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable # Initialise display lcd_init() lcd_string('Inisiasi sistem selesai..', 1, 1) self.updateRouteDisplay() logger.debug('init finished') self.say('Mesin tiket siap digunakan') def sendGpsPosition(self): #~ logger.debug(self.gpsThread.lastpos)
else: logger.info('SENDGPSINFO: GPS not set, not sending position to server..') except Exception: e = sys.exc_info() logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1])) def gpsReceived(self, gpsPos): #~ print newpos logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos))) if gpsPos['type'] == 0: if gpsPos['lon'] and gpsPos['lat']: #self.updateTrackPosition(gpsPos) #lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3) self.currentLon = gpsPos['lon'] self.currentLat = gpsPos['lat'] curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents) if self.currentAgent != curAgent: self.updateCurrentAgent(curAgent) #self.updateDestinations() if not curAgent: self.updateCurrentAgent('Di luar area') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') self.currentAgent = curAgent else: self.updateCurrentAgent('Belum mendapat sinyal GPS..') # reset price, distance self.updateDestinationPriceDistance('---', '---', '---') def updateCurrentAgent(self, newAgent): logger.debug('updateCurrentagent: %s' % newAgent) lcd_string('Agen: %s' % newAgent, 1, 1) def updateDestinationPriceDistance(self, dest, price, distance): if dest != '---': lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2) self.currentDestination = dest self.currentPrice = price self.currentDistance = distance else: lcd_string("Tujuan: --- Harga: ---", 1, 2) self.currentDestination = None self.currentPrice = None self.currentDistance = None def updateStatus(self, status, showTime): lcd_string('{0}'.format(status), 1, 3) self.say(status) QtCore.QTimer.singleShot(showTime, self.resetStatus) def resetStatus(self): lcd_string('', 1, 3) def speedReceived(self, speed): lcd_goto(('%s kmh' % speed).ljust(7),0,4) def sat_infoReceived(self, sat_info): lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4) def updateNoTicket(self, ticket_no): lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4) def updateRouteDisplay(self): dests = self.route.getDestinationNames() lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4) def getAgentInCurrentLocation(self, gpsPos, agents): #~ print curPos, agents for agent in agents: dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon'])) #~ print dist if dist <= (agents[agent]['radius'] / 1000.0): return agent return None def destinationChosen(self, dest_qstring): dest = str(dest_qstring) if dest: if self.currentAgent: # check if selected destination is valid if self.route.simpleDistanceTo(config.agents[self.currentAgent]['latlon'], normalized = True) < self.route.simpleDistanceTo(self.route.getDestinations()[dest]['latlon'], normalized = True): distance = self.route.distanceTo( config.agents[self.currentAgent]['latlon'], self.route.getDestinations()[dest]['latlon'] ) price = self.calculatePrice(self.currentAgent, dest, distance) self.updateDestinationPriceDistance(dest, price, distance) self.say(dest) else: self.updateDestinationPriceDistance('---', '---', '---') self.updateStatus('Tujuan tidak valid', 2000) else: self.updateStatus('Di luar agen', 2000) self.updateDestinationPriceDistance('---', '---', '---') else: self.updateDestinationPriceDistance('---', '---', '---') self.updateStatus('Error pemilihan tujuan', 2000) def say(self, tosay): subprocess.call('espeak -vid+f3 "%s" 2>/dev/null &' % tosay, shell=True) #~ speechThread = SpeechThread(tosay) #~ speechThread.start() pass def calculatePrice(self, fromAgent, destination, distance): print (fromAgent, destination, distance) for prices in self.route.getDestinations()[destination]['pricelist']: if fromAgent in prices['from']: return prices['price'] #~ return max(config.minimal_price, math.ceil((distance*config.price_per_km)/1000.0) * 1000) return 0 def printTicket(self): if self.currentAgent in config.agents: if self.currentDestination: # if any destination selected dest = self.currentDestination # print ticket #~ try: if config.printer_enabled: self.say('Mencetak tiket ke %s' % dest) gpsdt = self.gpsThread.lastpos['gpsdt'] curdt = datetime.fromtimestamp(gpsdt) # initialize or increment global ticket counter if not self.redis.get('discountTicketCounter'): self.redis.set('discountTicketCounter', 0) self.redis.incr('discountTicketCounter') # initialize or increment daily ticket counter if not self.redis.get(curdt.strftime('%Y%m%d:ticket_no')): self.redis.set(curdt.strftime('%Y%m%d:ticket_no'), 0) self.redis.incr(curdt.strftime('%Y%m%d:ticket_no')) isTicketFree = False if int(self.redis.get('discountTicketCounter')) >= 100: isTicketFree = True self.redis.set('discountTicketCounter', 0) p = PrinterC1(config.printer_port, 9600) p.selectFont1(2) if isTicketFree: p.printString(config.company_name) p
try: if self.gpsThread.lastpos: # ITPRO861001000786141,11025.595867,-659.625256,31,20121008035615.000,15,0,13,1, gprmclon = 100 *(int(self.gpsThread.lastpos['lon']) + ((self.gpsThread.lastpos['lon'] - int(self.gpsThread.lastpos['lon'])) / 100 * 60)) gprmclat = 100 *(int(self.gpsThread.lastpos['lat']) + ((self.gpsThread.lastpos['lat'] - int(self.gpsThread.lastpos['lat'])) / 100 * 60)) gpsmsg = 'ITPRO%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n' % ( config.bus_plateno, gprmclon, gprmclat, self.gpsThread.lastpos['alt'], time.strftime("%Y%m%d%H%M%S", time.gmtime(self.gpsThread.lastpos['gpsdt'])), self.gpsThread.lastpos['no_sat'], self.gpsThread.lastpos['speed'], self.gpsThread.lastpos['bearing'], 'A', self.gpsThread.lastpos['ext_power'], '', ) logger.debug('SENDGPSINFO: %s' % gpsmsg) self.redis.rpush('mq', gpsmsg)
identifier_body
lp.go
(waiting, leftID) } // TODO why only in this case? } else { leftID := tree.CreateLeftChild(nextID, first.Phi, false) waiting = append(waiting, leftID) } if second.Final { if len(second.Phi) != 0 { rightID := tree.CreateRightChild(nextID, second.Phi, true) waiting = append(waiting, rightID) } } else { rightID := tree.CreateRightChild(nextID, second.Phi, true) waiting = append(waiting, rightID) } } } func (tree *DNFTree) IsImplicant(mtp br.BooleanVector) bool { uID := 0 for k := 0; k < len(mtp); k++ { u := tree.Content[uID] if tree.IsLeaf(uID) { return true } leftChild, rightChild := u.leftChild, u.rightChild if mtp[k] { if leftChild >= 0 { uID = leftChild continue } else { if debug { if rightChild < 0 { panic("rightChild must not be nil in IsImplicant") } } uID = rightChild } } else { if rightChild >= 0 { uID = rightChild continue } else { return false } } } if debug { if !(tree.Content[uID].leftChild < 0 && tree.Content[uID].rightChild < 0) { panic("rightChild and leftChild must be nil in IsImplicant") } } return true } func (tree *DNFTree) IsRegular(mtps []br.BooleanVector) bool { numRuns := tree.Nbvar - 1 res := true // we will do this concurrently: // for each mtp iterate over all variable combinations and perform the test // and write the result to a channel // this also has some drawback: we need to wait for all mtps to finish // otherwise we would need some context wish would be too much here // so they all must write a result, even if one already returns false... report := make(chan bool, 10) // channel to report once we read all results done := make(chan bool) go func() { for i := 0; i < len(mtps); i++ { nxt := <-report if !nxt { res = false } } done <- true }() for k := 0; k < len(mtps); k++ { go func(index int) { mtp := mtps[index] check := true for i := 0; i < numRuns; i++ { if (!mtp[i]) && (mtp[i+1]) { // change the positions in the point, after the implicant test // we will change them again mtp[i] = true mtp[i+1] = false isImplicant := tree.IsImplicant(mtp) mtp[i] = false mtp[i+1] = true if !isImplicant { check = false break } } } report <- check }(k) } // wait until all results are there <-done return res } // TightenMode describes different modes to tighten the linear program // before solving it. // // There are three different modes described below. type TightenMode int const ( TightenNone TightenMode = iota // Add only constraings necessary for solving the problem TightenNeighbours // Add also constraings between variables x(i) and x(i + 1) TightenAll // Add additional constraints between all variable pairs ) type LinearProgram struct { Renaming, ReverseRenaming []int Tree *DNFTree Winder br.WinderMatrix LP *golp.LP MFPs, MTPs []br.BooleanVector Phi br.ClauseSet Nbvar int } // NewLinearProgram creates a new lp given the DNF ϕ. // // It will however not create the actual program or the tree, this must be done // somewhere else, it only creates the root node. // // Important note: For our algorithm to work the variables must be sorted // according to their importance. Since this is not always the case (only // during testing and some very special cases) this method will do this for // you, i.e. it will create the winder matrix and then rename all // variables accordingly. So the DNF we store in the root node is the // renamed DNF. But we also store the mapping that caused this renaming // in the field Renaming. This slice stores for each "old" variable // the id in the new tree, i.e. a lookup tree.Renaming[id] gives you the // id of the variable in the new tree. // The reverse mapping, i.e. new variable → old variable is stored in // ReverseRenaming. // // If you don't need the renaming set sortMatrix to false, in this case // the matrix will work properly but the variables don't get sorted. // That is only set it to false if you know that the ordering of the variables // is already correct. // Renaming and ReverseRenaming will be set to nil in this case. // // Also the clauses in the DNF must be sorted in increasing order. // If you don't want the clauses to get sorted set sortClauses to false. // Of course this only makes sense if also sortMatrix is set to false, // otherwise the new dnf might not be sorted. // This functions will sort them in this case nonetheless. // // The variables in the DNF have to be 0 <= v < nbar (so nbvar must be correct // and variables start with 0). // Also each variable should appear at least once in the DNF, what happens // otherwise is not tested yet. func NewLinearProgram(phi br.ClauseSet, nbvar int, sortMatrix, sortClauses bool) *LinearProgram { tree := NewDNFTree(nbvar) newDNF, winder, renaming, reverseRenaming := InitLP(phi, nbvar, sortMatrix) if sortMatrix || sortClauses { newDNF.SortAll() } dnfType := isFinal(newDNF) rootID := tree.CreateRoot(newDNF, dnfType != NotFinal) if debug { if rootID != 0 { panic("Expected root id to be 0, in NewLinearProgram") } } return &LinearProgram{Renaming: renaming, ReverseRenaming: reverseRenaming, Tree: tree, Winder: winder, LP: nil, MFPs: nil, MTPs: nil, Phi: newDNF, Nbvar: nbvar, } } // InitLP initializes the lp, that is it creates the Winder matrix for // (the renamed) ϕ. // It will also compute Renaming and ReverseRenaming as discussed in // NewLinearProgram. // // It returns first the renamedDNF, the Winder matrix, then Renaming and then // ReverseRenaming. // If sortMatrix is false the old dnf will be returned. func InitLP(phi br.ClauseSet, nbvar int, sortMatrix bool) (br.ClauseSet, br.WinderMatrix, []int, []int) { newDNF := phi var renaming, reverseRenaming []int = nil, nil winder := br.NewWinderMatrix(phi, nbvar, true) if sortMatrix { renaming = make([]int, nbvar) reverseRenaming = make([]int, nbvar) // sort the matrix winder.Sort() // create the renaming for newVariableId, row := range winder {
ewDNF = make([]br.Clause, len(phi)) // clone each clause // we'll do that concurrently var wg sync.WaitGroup wg.Add(len(phi)) for i := 0; i < len(phi); i++ { go func(index int) { clause := phi[index] var newClause br.Clause = make([]int, len(clause)) for j, oldID := range clause { newClause[j] = renaming[oldID] } newDNF[index] = newClause wg.Done() }(i) } wg.Wait() } return newDNF, winder, renaming, reverseRenaming } func (lp LinearProgram) Solve(tighten TightenMode, regTest bool) (*LPB, error) { // create minimal true points mtps := ComputeMTPs(lp.Phi, lp.Nbvar) lp.MTPs = mtps // if regularity test should be beformed create the DNF tree if
renaming[row[len(row)-1]] = newVariableId reverseRenaming[newVariableId] = row[len(row)-1] } n
conditional_block
lp.go
:= len(p1) for k := 0; k < size; k++ { val1, val2 := p1[k], p2[k] if (!val1) && val2 { return true } else if val1 && (!val2) { return false } } if debug { panic("Must not reach this state in ComputeMFPs") } return false } sort.Slice(mtps, cmp) } // compute nu, we do this concurrently var wg sync.WaitGroup wg.Add(len(mtps) - 1) nu := make([]int, len(mtps)) for i := 1; i < len(mtps); i++ { go func(index int) { vars := len(mtps[index]) for j := 0; j < vars; j++ { val1 := mtps[index-1][j] val2 := mtps[index][j] if (!val1) && val2 { nu[index] = j + 1 break } } wg.Done() }(i) } wg.Wait() // create the actual points, again we do that concurrently and communicate // via a channel // we range over that channel so we must not forget to close it! res := make([]br.BooleanVector, 0, 10) // start a function that listens on the channel and adds all points to the // result // we use a done channel to signal when all points have been added resChan := make(chan br.BooleanVector, 10) done := make(chan bool) go func() { for point := range resChan { res = append(res, point) } done <- true }() // in the wait group we wait until for all i we've added all points // after all points were written to the channel we close the channel and then // wait until they have been added to result wg.Add(len(mtps)) for i := 0; i < len(mtps); i++ { go func(index int) { point := mtps[index] vars := len(point) for j := nu[index]; j < vars; j++ { if point[j] { if debug { if nu[index] > j { panic("nu[i] must be <= j in ComputeMFPs") } } newPoint := point.Clone() newPoint[j] = false for k := j + 1; k < vars; k++ { newPoint[k] = true } resChan <- newPoint } } wg.Done() }(i) } wg.Wait() close(resChan) // now wait until all points were added to res <-done return res } // FormulateLP will formulate the linear program to solve. // It will set the following constraings: // 1. All variables must be of type int (note that this is really bad for // the runtime of lpsolve) // 2. For each minimal true point (a1, ..., ak) where ai are the variables // that are true a constraint that says that the sum of // all variables must be ≥ the degree // that is we transform the problem a bit and get: // a1 + ... + ak ≥ d ⇔ a1 + ... + ak -d ≥ 0 // 3. For each maximal false point (a1, ..., ak) where are ai are the variables // that are true a constraint that says that the sum of all variables // must be < the degree: // a1 + ... + ak < d // because lpsolve only allows ≤ we transform this to // a1 + ... + ak ≤ d - 1 ⇔ a1 + ... + ak - ≤ -1 // // The additional constraints depend on the mode: // If mode is TightenNeighbours we compare all variables w(i) and w(i+1). // We know that it must always hold that w(i) ≥ w(i+1), but it could also // be w(i) = w(i+1), we find that out by comparing the Winder matrix entries. // So we have w(i) ≥ w(i+1) ⇔ w(i) - w(i+1) >= 0 or w(i) - w(i+1) = 0. // TODO we can make this easily concurrent func FormulateLP(mtps, mfps []br.BooleanVector, nbvar int, winder br.WinderMatrix, tighten TightenMode) (*golp.LP, error) { // go uses zero based ids, so all variables have ids between 0 and nbvar -1 // the degree has id nbvar degreeID := nbvar lp := golp.NewLP(0, nbvar+1) // set int constraing on all variables for column := 0; column < nbvar+1; column++ { lp.SetInt(column, true) } for _, mtp := range mtps { // now add the constraint row := make([]golp.Entry, 0, nbvar+1) for j, val := range mtp { if val { row = append(row, golp.Entry{Col: j, Val: 1}) } } // add -d row = append(row, golp.Entry{Col: degreeID, Val: -1}) // add the row if err := lp.AddConstraintSparse(row, golp.GE, 0); err != nil { return nil, err } } for _, mfp := range mfps { row := make([]golp.Entry, 0, nbvar+1) for j, val := range mfp { if val { row = append(row, golp.Entry{Col: j, Val: 1}) } } // add -d row = append(row, golp.Entry{Col: degreeID, Val: -1}) if err := lp.AddConstraintSparse(row, golp.LE, -1); err != nil { return nil, err } } // now we add additional constraints, depending on the mode switch { case tighten == TightenNeighbours: // add a constraint for neighbouring variables // we already know that w(i) ≥ w(i+1), but we could already conclude // that they must be equal entry1 := golp.Entry{Col: -1, Val: 1} entry2 := golp.Entry{Col: -1, Val: -1} for i := 1; i < nbvar; i++ { // compare both rows compRes := br.CompareMatrixEntry(winder[i-1], winder[i]) var constraint golp.ConstraintType = golp.GE if debug { if compRes < 0 { panic("Unsorted Winder matrix in FormulateLP") } } if compRes == 0 { constraint = golp.EQ } // now update the row and add the constraint entry1.Col = i - 1 entry2.Col = i if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, constraint, 0); err != nil { return nil, err } } case tighten == TightenAll && nbvar > 0: // first we will compare each entry i with i+1 and save the comparison // result between i and i + 1 // we make use of the transitivity of the comparison and later we don't // have to compare matrix rows again. // To add all pairs between i and j where i < j we simply have to lookup // the precomputed results: as long as the comparison result is = the // variables must be equal, after that only ≥ // TODO would be nice if someone checked this... really confusing // with all this index stuff ;) precomputed := make([]int, nbvar-1) for i := 1; i < nbvar; i++ { compRes := br.CompareMatrixEntry(winder[i-1], winder[i]) if debug { if compRes < 0 { panic("Unsorted Winder matrix in FormulateLP") } } precomputed[i-1] = compRes } entry1 := golp.Entry{Col: -1, Val: 1} entry2 := golp.Entry{Col: -1, Val: -1} // now add all variable pair results for i := 0; i < nbvar; i++ { entry1.Col = i // first find the longest sequence s.t. the variables are equal j := i + 1 // loop as long as j is equivalent to its predecessor // as long as this is the case i is equal to j for ; j < nbvar && precomputed[j-1] == 0; j++ {
// add eq constraing entry2.Col = j
random_line_split
lp.go
(waiting, leftID) } // TODO why only in this case? } else { leftID := tree.CreateLeftChild(nextID, first.Phi, false) waiting = append(waiting, leftID) } if second.Final { if len(second.Phi) != 0 { rightID := tree.CreateRightChild(nextID, second.Phi, true) waiting = append(waiting, rightID) } } else { rightID := tree.CreateRightChild(nextID, second.Phi, true) waiting = append(waiting, rightID) } } } func (tree *DNFTree) IsImplicant(mtp br.BooleanVector) bool { uID := 0 for k := 0; k < len(mtp); k++ { u := tree.Content[uID] if tree.IsLeaf(uID) { return true } leftChild, rightChild := u.leftChild, u.rightChild if mtp[k] { if leftChild >= 0 { uID = leftChild continue } else { if debug { if rightChild < 0 { panic("rightChild must not be nil in IsImplicant") } } uID = rightChild } } else { if rightChild >= 0 { uID = rightChild continue } else { return false } } } if debug { if !(tree.Content[uID].leftChild < 0 && tree.Content[uID].rightChild < 0) { panic("rightChild and leftChild must be nil in IsImplicant") } } return true } func (tree *DNFTree) IsRegular(mtps []br.BooleanVector) bool
}() for k := 0; k < len(mtps); k++ { go func(index int) { mtp := mtps[index] check := true for i := 0; i < numRuns; i++ { if (!mtp[i]) && (mtp[i+1]) { // change the positions in the point, after the implicant test // we will change them again mtp[i] = true mtp[i+1] = false isImplicant := tree.IsImplicant(mtp) mtp[i] = false mtp[i+1] = true if !isImplicant { check = false break } } } report <- check }(k) } // wait until all results are there <-done return res } // TightenMode describes different modes to tighten the linear program // before solving it. // // There are three different modes described below. type TightenMode int const ( TightenNone TightenMode = iota // Add only constraings necessary for solving the problem TightenNeighbours // Add also constraings between variables x(i) and x(i + 1) TightenAll // Add additional constraints between all variable pairs ) type LinearProgram struct { Renaming, ReverseRenaming []int Tree *DNFTree Winder br.WinderMatrix LP *golp.LP MFPs, MTPs []br.BooleanVector Phi br.ClauseSet Nbvar int } // NewLinearProgram creates a new lp given the DNF ϕ. // // It will however not create the actual program or the tree, this must be done // somewhere else, it only creates the root node. // // Important note: For our algorithm to work the variables must be sorted // according to their importance. Since this is not always the case (only // during testing and some very special cases) this method will do this for // you, i.e. it will create the winder matrix and then rename all // variables accordingly. So the DNF we store in the root node is the // renamed DNF. But we also store the mapping that caused this renaming // in the field Renaming. This slice stores for each "old" variable // the id in the new tree, i.e. a lookup tree.Renaming[id] gives you the // id of the variable in the new tree. // The reverse mapping, i.e. new variable → old variable is stored in // ReverseRenaming. // // If you don't need the renaming set sortMatrix to false, in this case // the matrix will work properly but the variables don't get sorted. // That is only set it to false if you know that the ordering of the variables // is already correct. // Renaming and ReverseRenaming will be set to nil in this case. // // Also the clauses in the DNF must be sorted in increasing order. // If you don't want the clauses to get sorted set sortClauses to false. // Of course this only makes sense if also sortMatrix is set to false, // otherwise the new dnf might not be sorted. // This functions will sort them in this case nonetheless. // // The variables in the DNF have to be 0 <= v < nbar (so nbvar must be correct // and variables start with 0). // Also each variable should appear at least once in the DNF, what happens // otherwise is not tested yet. func NewLinearProgram(phi br.ClauseSet, nbvar int, sortMatrix, sortClauses bool) *LinearProgram { tree := NewDNFTree(nbvar) newDNF, winder, renaming, reverseRenaming := InitLP(phi, nbvar, sortMatrix) if sortMatrix || sortClauses { newDNF.SortAll() } dnfType := isFinal(newDNF) rootID := tree.CreateRoot(newDNF, dnfType != NotFinal) if debug { if rootID != 0 { panic("Expected root id to be 0, in NewLinearProgram") } } return &LinearProgram{Renaming: renaming, ReverseRenaming: reverseRenaming, Tree: tree, Winder: winder, LP: nil, MFPs: nil, MTPs: nil, Phi: newDNF, Nbvar: nbvar, } } // InitLP initializes the lp, that is it creates the Winder matrix for // (the renamed) ϕ. // It will also compute Renaming and ReverseRenaming as discussed in // NewLinearProgram. // // It returns first the renamedDNF, the Winder matrix, then Renaming and then // ReverseRenaming. // If sortMatrix is false the old dnf will be returned. func InitLP(phi br.ClauseSet, nbvar int, sortMatrix bool) (br.ClauseSet, br.WinderMatrix, []int, []int) { newDNF := phi var renaming, reverseRenaming []int = nil, nil winder := br.NewWinderMatrix(phi, nbvar, true) if sortMatrix { renaming = make([]int, nbvar) reverseRenaming = make([]int, nbvar) // sort the matrix winder.Sort() // create the renaming for newVariableId, row := range winder { renaming[row[len(row)-1]] = newVariableId reverseRenaming[newVariableId] = row[len(row)-1] } newDNF = make([]br.Clause, len(phi)) // clone each clause // we'll do that concurrently var wg sync.WaitGroup wg.Add(len(phi)) for i := 0; i < len(phi); i++ { go func(index int) { clause := phi[index] var newClause br.Clause = make([]int, len(clause)) for j, oldID := range clause { newClause[j] = renaming[oldID] } newDNF[index] = newClause wg.Done() }(i) } wg.Wait() } return newDNF, winder, renaming, reverseRenaming } func (lp LinearProgram) Solve(tighten TightenMode, regTest bool) (*LPB, error) { // create minimal true points mtps := ComputeMTPs(lp.Phi, lp.Nbvar) lp.MTPs = mtps // if regularity test should be beformed create the DNF tree if reg
{ numRuns := tree.Nbvar - 1 res := true // we will do this concurrently: // for each mtp iterate over all variable combinations and perform the test // and write the result to a channel // this also has some drawback: we need to wait for all mtps to finish // otherwise we would need some context wish would be too much here // so they all must write a result, even if one already returns false... report := make(chan bool, 10) // channel to report once we read all results done := make(chan bool) go func() { for i := 0; i < len(mtps); i++ { nxt := <-report if !nxt { res = false } } done <- true
identifier_body
lp.go
); k++ { u := tree.Content[uID] if tree.IsLeaf(uID) { return true } leftChild, rightChild := u.leftChild, u.rightChild if mtp[k] { if leftChild >= 0 { uID = leftChild continue } else { if debug { if rightChild < 0 { panic("rightChild must not be nil in IsImplicant") } } uID = rightChild } } else { if rightChild >= 0 { uID = rightChild continue } else { return false } } } if debug { if !(tree.Content[uID].leftChild < 0 && tree.Content[uID].rightChild < 0) { panic("rightChild and leftChild must be nil in IsImplicant") } } return true } func (tree *DNFTree) IsRegular(mtps []br.BooleanVector) bool { numRuns := tree.Nbvar - 1 res := true // we will do this concurrently: // for each mtp iterate over all variable combinations and perform the test // and write the result to a channel // this also has some drawback: we need to wait for all mtps to finish // otherwise we would need some context wish would be too much here // so they all must write a result, even if one already returns false... report := make(chan bool, 10) // channel to report once we read all results done := make(chan bool) go func() { for i := 0; i < len(mtps); i++ { nxt := <-report if !nxt { res = false } } done <- true }() for k := 0; k < len(mtps); k++ { go func(index int) { mtp := mtps[index] check := true for i := 0; i < numRuns; i++ { if (!mtp[i]) && (mtp[i+1]) { // change the positions in the point, after the implicant test // we will change them again mtp[i] = true mtp[i+1] = false isImplicant := tree.IsImplicant(mtp) mtp[i] = false mtp[i+1] = true if !isImplicant { check = false break } } } report <- check }(k) } // wait until all results are there <-done return res } // TightenMode describes different modes to tighten the linear program // before solving it. // // There are three different modes described below. type TightenMode int const ( TightenNone TightenMode = iota // Add only constraings necessary for solving the problem TightenNeighbours // Add also constraings between variables x(i) and x(i + 1) TightenAll // Add additional constraints between all variable pairs ) type LinearProgram struct { Renaming, ReverseRenaming []int Tree *DNFTree Winder br.WinderMatrix LP *golp.LP MFPs, MTPs []br.BooleanVector Phi br.ClauseSet Nbvar int } // NewLinearProgram creates a new lp given the DNF ϕ. // // It will however not create the actual program or the tree, this must be done // somewhere else, it only creates the root node. // // Important note: For our algorithm to work the variables must be sorted // according to their importance. Since this is not always the case (only // during testing and some very special cases) this method will do this for // you, i.e. it will create the winder matrix and then rename all // variables accordingly. So the DNF we store in the root node is the // renamed DNF. But we also store the mapping that caused this renaming // in the field Renaming. This slice stores for each "old" variable // the id in the new tree, i.e. a lookup tree.Renaming[id] gives you the // id of the variable in the new tree. // The reverse mapping, i.e. new variable → old variable is stored in // ReverseRenaming. // // If you don't need the renaming set sortMatrix to false, in this case // the matrix will work properly but the variables don't get sorted. // That is only set it to false if you know that the ordering of the variables // is already correct. // Renaming and ReverseRenaming will be set to nil in this case. // // Also the clauses in the DNF must be sorted in increasing order. // If you don't want the clauses to get sorted set sortClauses to false. // Of course this only makes sense if also sortMatrix is set to false, // otherwise the new dnf might not be sorted. // This functions will sort them in this case nonetheless. // // The variables in the DNF have to be 0 <= v < nbar (so nbvar must be correct // and variables start with 0). // Also each variable should appear at least once in the DNF, what happens // otherwise is not tested yet. func NewLinearProgram(phi br.ClauseSet, nbvar int, sortMatrix, sortClauses bool) *LinearProgram { tree := NewDNFTree(nbvar) newDNF, winder, renaming, reverseRenaming := InitLP(phi, nbvar, sortMatrix) if sortMatrix || sortClauses { newDNF.SortAll() } dnfType := isFinal(newDNF) rootID := tree.CreateRoot(newDNF, dnfType != NotFinal) if debug { if rootID != 0 { panic("Expected root id to be 0, in NewLinearProgram") } } return &LinearProgram{Renaming: renaming, ReverseRenaming: reverseRenaming, Tree: tree, Winder: winder, LP: nil, MFPs: nil, MTPs: nil, Phi: newDNF, Nbvar: nbvar, } } // InitLP initializes the lp, that is it creates the Winder matrix for // (the renamed) ϕ. // It will also compute Renaming and ReverseRenaming as discussed in // NewLinearProgram. // // It returns first the renamedDNF, the Winder matrix, then Renaming and then // ReverseRenaming. // If sortMatrix is false the old dnf will be returned. func InitLP(phi br.ClauseSet, nbvar int, sortMatrix bool) (br.ClauseSet, br.WinderMatrix, []int, []int) { newDNF := phi var renaming, reverseRenaming []int = nil, nil winder := br.NewWinderMatrix(phi, nbvar, true) if sortMatrix { renaming = make([]int, nbvar) reverseRenaming = make([]int, nbvar) // sort the matrix winder.Sort() // create the renaming for newVariableId, row := range winder { renaming[row[len(row)-1]] = newVariableId reverseRenaming[newVariableId] = row[len(row)-1] } newDNF = make([]br.Clause, len(phi)) // clone each clause // we'll do that concurrently var wg sync.WaitGroup wg.Add(len(phi)) for i := 0; i < len(phi); i++ { go func(index int) { clause := phi[index] var newClause br.Clause = make([]int, len(clause)) for j, oldID := range clause { newClause[j] = renaming[oldID] } newDNF[index] = newClause wg.Done() }(i) } wg.Wait() } return newDNF, winder, renaming, reverseRenaming } func (lp LinearProgram) Solve(tighten TightenMode, regTest bool) (*LPB, error) { // create minimal true points mtps := ComputeMTPs(lp.Phi, lp.Nbvar) lp.MTPs = mtps // if regularity test should be beformed create the DNF tree if regTest { lp.Tree.BuildTree() if !lp.Tree.IsRegular(mtps) { return nil, errors.New("DNF is not regular") } } // compute maximal false points mfps := ComputeMFPs(mtps, true) lp.MFPs = mfps // setup the linear program program, setupErr := FormulateLP(mtps, mfps, lp.Nbvar, lp.Winder, tighten) if setupErr != nil { return nil, setupErr } lp.LP = program // try to convert it return SolveLP(program) } // ComputeMTPs computes the set of minimal true points of a minimal ϕ. // Since ϕ is minimal this is easy: Each clause defines exactly one minimal // true point. func Comput
eMTPs(phi b
identifier_name
nexus_label.rs
pub guid: GptGuid, /// lba of where to find the partition table pub lba_table: u64, /// number of partitions, most tools set this to 128 pub num_entries: u32, /// Size of element pub entry_size: u32, /// CRC32 checksum of the partition array. pub table_crc: u32, } impl GPTHeader { /// converts a slice into a gpt header and verifies the validity of the data pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> { let mut reader = Cursor::new(slice); let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap(); if gpt.header_size != 92 || gpt.signature != [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54] || gpt.revision != [0x00, 0x00, 0x01, 0x00] { return Err(Error::Invalid); } let crc = gpt.self_checksum; gpt.self_checksum = 0; gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap()); if gpt.self_checksum != crc { info!("GPT label crc mismatch"); return Err(Error::Invalid); } if gpt.lba_self > gpt.lba_alt { std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt) } Ok(gpt) } /// checksum the header with the checksum field itself set 0 pub fn checksum(&mut self) -> u32 { self.self_checksum = 0; self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap()); self.self_checksum } pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self { let fields = guid.as_fields(); GPTHeader { signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54], revision: [0x00, 0x00, 0x01, 0x00], header_size: 92, self_checksum: 0, reserved: [0; 4], lba_self: 1, lba_alt: num_blocks - 1, lba_start: u64::from((1 << 20) / blk_size), lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1, guid: GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, }, lba_table: 2, num_entries: 2, entry_size: 128, table_crc: 0, } } pub fn to_backup(&self) -> Self { let mut secondary = *self; secondary.lba_self = self.lba_alt; secondary.lba_alt = self.lba_self; secondary.lba_table = self.lba_end + 1; secondary } } #[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)] pub struct GptEntry { /// GUID type, some of them are assigned/reserved for example to Linux pub ent_type: GptGuid, /// entry GUID, can be anything typically random pub ent_guid: GptGuid, /// start lba for this entry pub ent_start: u64, /// end lba for this entry pub ent_end: u64, /// entry attributes, according to do the docs bit 0 MUST be zero pub ent_attr: u64, /// utf16 name of the partition entry, do not confuse this fs labels! pub ent_name: GptName, } impl GptEntry { /// converts a slice into a partition array pub fn from_slice( slice: &[u8], parts: u32, ) -> Result<Vec<GptEntry>, Error> { let mut reader = Cursor::new(slice); let mut part_vec = Vec::new(); // TODO 128 should be passed in as a argument for _ in 0 .. parts { part_vec.push(deserialize_from(&mut reader)?); } Ok(part_vec) } /// calculate the checksum over the partitions table pub fn checksum(parts: &[GptEntry]) -> u32 { let mut digest = crc32::Digest::new(crc32::IEEE); for p in parts { digest.write(&serialize(p).unwrap()); } digest.sum32() } } #[derive(Debug, PartialEq, Serialize, Clone)] /// The nexus label is standard GPT label (such that you can use it without us /// in the data path) The only thing that is really specific to us is the /// ent_type GUID if we see that attached to a partition, we assume the data in /// that partition is ours. In the data we will have more magic markers to /// confirm the assumption but this is step one. pub struct NexusLabel { /// the main GPT header pub primary: GPTHeader, /// Vector of GPT entries where the first element is considered to be ours pub partitions: Vec<GptEntry>, } impl NexusLabel { /// returns the offset to the first data segment pub(crate) fn offset(&self) -> u64 { self.partitions[1].ent_start } /// returns the number of total blocks in this segment pub(crate) fn num_blocks(&self) -> u64 { self.partitions[1].ent_end - self.partitions[1].ent_start } } impl Display for NexusLabel { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "GUID: {}", self.primary.guid.to_string())?; writeln!(f, "\tHeader crc32 {}", self.primary.self_checksum)?; writeln!(f, "\tPartition table crc32 {}", self.primary.table_crc)?; for i in 0 .. self.partitions.len() { writeln!(f, "\tPartition number {}", i)?; writeln!(f, "\tGUID: {}", self.partitions[i].ent_guid.to_string())?; writeln!( f, "\tType GUID: {}", self.partitions[i].ent_type.to_string() )?; writeln!( f, "\tLogical block start: {}, end: {}", self.partitions[i].ent_start, self.partitions[i].ent_end )?; } Ok(()) } } // for arrays bigger then 32 elements, things start to get unimplemented // in terms of derive and what not. So we create a struct with a string // and tell serde how to use it during (de)serializing struct GpEntryNameVisitor; impl<'de> Deserialize<'de> for GptName { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor) } } impl Serialize for GptName { fn serialize<S>( &self, serializer: S, ) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { // we cant use serialize_type_struct here as we want exactly 72 bytes let mut s = serializer.serialize_tuple(36)?; let mut out: Vec<u16> = vec![0; 36]; for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) { *o = i; } out.iter().for_each(|e| s.serialize_element(&e).unwrap()); s.end() } } impl<'de> Visitor<'de> for GpEntryNameVisitor { type Value = GptName; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("Invalid GPT partition name") } fn visit_seq<A>(self, mut seq: A) -> std::result::Result<GptName, A::Error> where A: SeqAccess<'de>, { le
t mut out = Vec::new(); let mut end = false; loop { match seq.next_element()? { Some(0) => { end = true; } Some(e) if !end => out.push(e), _ => break, } } if end { Ok(GptName { name: String::from_utf16_lossy(&out), }) } else { Err(serde::de::Error::invalid_value(Unexpected::Seq, &self)) } }
identifier_body
nexus_label.rs
4.0 MiB FFFF MayaMeta //! 2 10240 2097118 1019.0 MiB FFFF MayaData //! ``` //! //! Notice how two partitions have been created when accessing the disk //! when shared by the nexus: //! //! ```bash //! $ mctl share gpt //! "/dev/nbd0" //! //! TODO: also note how it complains about a MBR //! //! $ lsblk //! NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT //! sda 8:0 0 50G 0 disk //! ├─sda1 8:1 0 41.5G 0 part / //! ├─sda2 8:2 0 7M 0 part [SWAP] //! └─sda3 8:3 0 511M 0 part /boot //! sr0 11:0 1 1024M 0 rom //! nbd0 43:0 0 1019M 0 disk //! nvme0n1 259:0 0 200G 0 disk /code //! //! The nbd0 zero device does not show the partitions //! ``` use crate::bdev::nexus::Error; use bincode::{deserialize_from, serialize}; use crc::{crc32, Hasher32}; use serde::{ de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor}, ser::{Serialize, SerializeTuple, Serializer}, }; use std::{ fmt::{self, Display}, io::Cursor, }; use uuid::{self, parser}; #[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)] /// based on RFC4122 pub struct GptGuid { pub time_low: u32, pub time_mid: u16, pub time_high: u16, pub node: [u8; 8], } impl std::str::FromStr for GptGuid { type Err = parser::ParseError; fn from_str(uuid: &str) -> Result<Self, Self::Err> { let fields = uuid::Uuid::from_str(uuid)?; let fields = fields.as_fields(); Ok(GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, }) } } impl std::fmt::Display for GptGuid { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", uuid::Uuid::from_fields( self.time_low, self.time_mid, self.time_high, &self.node, ) .unwrap()
impl GptGuid { pub(crate) fn new_random() -> Self { let fields = uuid::Uuid::new_v4(); let fields = fields.as_fields(); GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, } } } #[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)] pub struct GPTHeader { /// GPT signature (must be "EFI PART"). pub signature: [u8; 8], /// 00 00 01 00 up til version 2.17 pub revision: [u8; 4], /// GPT header size (92 bytes) pub header_size: u32, /// CRC32 of the header. pub self_checksum: u32, pub reserved: [u8; 4], /// primary lba where the header is located pub lba_self: u64, /// alternative lba where the header is located (backup) pub lba_alt: u64, /// first usable lba pub lba_start: u64, /// last usable lba pub lba_end: u64, /// 16 bytes representing the GUID of the GPT. pub guid: GptGuid, /// lba of where to find the partition table pub lba_table: u64, /// number of partitions, most tools set this to 128 pub num_entries: u32, /// Size of element pub entry_size: u32, /// CRC32 checksum of the partition array. pub table_crc: u32, } impl GPTHeader { /// converts a slice into a gpt header and verifies the validity of the data pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> { let mut reader = Cursor::new(slice); let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap(); if gpt.header_size != 92 || gpt.signature != [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54] || gpt.revision != [0x00, 0x00, 0x01, 0x00] { return Err(Error::Invalid); } let crc = gpt.self_checksum; gpt.self_checksum = 0; gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap()); if gpt.self_checksum != crc { info!("GPT label crc mismatch"); return Err(Error::Invalid); } if gpt.lba_self > gpt.lba_alt { std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt) } Ok(gpt) } /// checksum the header with the checksum field itself set 0 pub fn checksum(&mut self) -> u32 { self.self_checksum = 0; self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap()); self.self_checksum } pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self { let fields = guid.as_fields(); GPTHeader { signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54], revision: [0x00, 0x00, 0x01, 0x00], header_size: 92, self_checksum: 0, reserved: [0; 4], lba_self: 1, lba_alt: num_blocks - 1, lba_start: u64::from((1 << 20) / blk_size), lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1, guid: GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, }, lba_table: 2, num_entries: 2, entry_size: 128, table_crc: 0, } } pub fn to_backup(&self) -> Self { let mut secondary = *self; secondary.lba_self = self.lba_alt; secondary.lba_alt = self.lba_self; secondary.lba_table = self.lba_end + 1; secondary } } #[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)] pub struct GptEntry { /// GUID type, some of them are assigned/reserved for example to Linux pub ent_type: GptGuid, /// entry GUID, can be anything typically random pub ent_guid: GptGuid, /// start lba for this entry pub ent_start: u64, /// end lba for this entry pub ent_end: u64, /// entry attributes, according to do the docs bit 0 MUST be zero pub ent_attr: u64, /// utf16 name of the partition entry, do not confuse this fs labels! pub ent_name: GptName, } impl GptEntry { /// converts a slice into a partition array pub fn from_slice( slice: &[u8], parts: u32, ) -> Result<Vec<GptEntry>, Error> { let mut reader = Cursor::new(slice); let mut part_vec = Vec::new(); // TODO 128 should be passed in as a argument for _ in 0 .. parts { part_vec.push(deserialize_from(&mut reader)?); } Ok(part_vec) } /// calculate the checksum over the partitions table pub fn checksum(parts: &[GptEntry]) -> u32 { let
.to_string() ) } }
random_line_split
nexus_label.rs
0 200G 0 disk /code //! //! The nbd0 zero device does not show the partitions //! ``` use crate::bdev::nexus::Error; use bincode::{deserialize_from, serialize}; use crc::{crc32, Hasher32}; use serde::{ de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor}, ser::{Serialize, SerializeTuple, Serializer}, }; use std::{ fmt::{self, Display}, io::Cursor, }; use uuid::{self, parser}; #[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)] /// based on RFC4122 pub struct GptGuid { pub time_low: u32, pub time_mid: u16, pub time_high: u16, pub node: [u8; 8], } impl std::str::FromStr for GptGuid { type Err = parser::ParseError; fn from_str(uuid: &str) -> Result<Self, Self::Err> { let fields = uuid::Uuid::from_str(uuid)?; let fields = fields.as_fields(); Ok(GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, }) } } impl std::fmt::Display for GptGuid { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", uuid::Uuid::from_fields( self.time_low, self.time_mid, self.time_high, &self.node, ) .unwrap() .to_string() ) } } impl GptGuid { pub(crate) fn new_random() -> Self { let fields = uuid::Uuid::new_v4(); let fields = fields.as_fields(); GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, } } } #[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)] pub struct GPTHeader { /// GPT signature (must be "EFI PART"). pub signature: [u8; 8], /// 00 00 01 00 up til version 2.17 pub revision: [u8; 4], /// GPT header size (92 bytes) pub header_size: u32, /// CRC32 of the header. pub self_checksum: u32, pub reserved: [u8; 4], /// primary lba where the header is located pub lba_self: u64, /// alternative lba where the header is located (backup) pub lba_alt: u64, /// first usable lba pub lba_start: u64, /// last usable lba pub lba_end: u64, /// 16 bytes representing the GUID of the GPT. pub guid: GptGuid, /// lba of where to find the partition table pub lba_table: u64, /// number of partitions, most tools set this to 128 pub num_entries: u32, /// Size of element pub entry_size: u32, /// CRC32 checksum of the partition array. pub table_crc: u32, } impl GPTHeader { /// converts a slice into a gpt header and verifies the validity of the data pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> { let mut reader = Cursor::new(slice); let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap(); if gpt.header_size != 92 || gpt.signature != [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54] || gpt.revision != [0x00, 0x00, 0x01, 0x00] { return Err(Error::Invalid); } let crc = gpt.self_checksum; gpt.self_checksum = 0; gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap()); if gpt.self_checksum != crc { info!("GPT label crc mismatch"); return Err(Error::Invalid); } if gpt.lba_self > gpt.lba_alt { std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt) } Ok(gpt) } /// checksum the header with the checksum field itself set 0 pub fn checksum(&mut self) -> u32 { self.self_checksum = 0; self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap()); self.self_checksum } pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self { let fields = guid.as_fields(); GPTHeader { signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54], revision: [0x00, 0x00, 0x01, 0x00], header_size: 92, self_checksum: 0, reserved: [0; 4], lba_self: 1, lba_alt: num_blocks - 1, lba_start: u64::from((1 << 20) / blk_size), lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1, guid: GptGuid { time_low: fields.0, time_mid: fields.1, time_high: fields.2, node: *fields.3, }, lba_table: 2, num_entries: 2, entry_size: 128, table_crc: 0, } } pub fn to_backup(&self) -> Self { let mut secondary = *self; secondary.lba_self = self.lba_alt; secondary.lba_alt = self.lba_self; secondary.lba_table = self.lba_end + 1; secondary } } #[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)] pub struct GptEntry { /// GUID type, some of them are assigned/reserved for example to Linux pub ent_type: GptGuid, /// entry GUID, can be anything typically random pub ent_guid: GptGuid, /// start lba for this entry pub ent_start: u64, /// end lba for this entry pub ent_end: u64, /// entry attributes, according to do the docs bit 0 MUST be zero pub ent_attr: u64, /// utf16 name of the partition entry, do not confuse this fs labels! pub ent_name: GptName, } impl GptEntry { /// converts a slice into a partition array pub fn from_slice( slice: &[u8], parts: u32, ) -> Result<Vec<GptEntry>, Error> { let mut reader = Cursor::new(slice); let mut part_vec = Vec::new(); // TODO 128 should be passed in as a argument for _ in 0 .. parts { part_vec.push(deserialize_from(&mut reader)?); } Ok(part_vec) } /// calculate the checksum over the partitions table pub fn checksum(parts: &[GptEntry]) -> u32 { let mut digest = crc32::Digest::new(crc32::IEEE); for p in parts { digest.write(&serialize(p).unwrap()); } digest.sum32() } } #[derive(Debug, PartialEq, Serialize, Clone)] /// The nexus label is standard GPT label (such that you can use it without us /// in the data path) The only thing that is really specific to us is the /// ent_type GUID if we see that attached to a partition, we assume the data in /// that partition is ours. In the data we will have more magic markers to /// confirm the assumption but this is step one. pub struct NexusLabel { /// the main GPT header pub primary: GPTHeader, /// Vector of GPT entries where the first element is considered to be ours pub partitions: Vec<GptEntry>, } impl NexusLabel { /// returns the offset to the first data segment pub(crate) fn offset(&self) -> u64 { self.partitions[1].ent_start } /// returns the number of total blocks in this segment pub(crate) fn num_blocks(&self) -> u64 { self.partitions[1].ent_end - self.partitions[1].ent_start } } impl Display for NexusLabel { fn fmt(&self, f
: &
identifier_name
jira-api.service.ts
jql: searchQuery, }, }, cfg, }); } getIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssue> { return this._getIssueById$(issueId, cfg, true); } getReducedIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssueReduced> { return this._getIssueById$(issueId, cfg, false); } getCurrentUser$(cfg: JiraCfg, isForce: boolean = false): Observable<JiraOriginalUser> { return this._sendRequest$({ jiraReqCfg: { pathname: `myself`, transform: mapResponse, }, cfg, isForce, }); } listStatus$(cfg: JiraCfg): Observable<JiraOriginalStatus[]> { return this._sendRequest$({ jiraReqCfg: { pathname: `status`, transform: mapResponse, }, cfg, }); } getTransitionsForIssue$( issueId: string, cfg: JiraCfg, ): Observable<JiraOriginalTransition[]> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/transitions`, method: 'GET', query: { expand: 'transitions.fields', }, transform: mapTransitionResponse, }, cfg, }); } transitionIssue$(issueId: string, transitionId: string, cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/transitions`, method: 'POST', body: { transition: { id: transitionId, }, }, transform: mapResponse, }, cfg, }); } updateAssignee$(issueId: string, accountId: string, cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/assignee`, method: 'PUT', body: { accountId, }, }, cfg, }); } addWorklog$({ issueId, started, timeSpent, comment, cfg, }: { issueId: string; started: string; timeSpent: number; comment: string; cfg: JiraCfg; }): Observable<any> { const worklog = { started: moment(started).locale('en').format(JIRA_DATETIME_FORMAT), timeSpentSeconds: Math.floor(timeSpent / 1000), comment, }; return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/worklog`, method: 'POST', body: worklog, transform: mapResponse, }, cfg, }); } private _getIssueById$( issueId: string, cfg: JiraCfg, isGetChangelog: boolean = false, ): Observable<JiraIssue> { return this._sendRequest$({ jiraReqCfg: { transform: mapIssueResponse as (res: any, cfg?: JiraCfg) => any, pathname: `issue/${issueId}`, query: { expand: isGetChangelog ? ['changelog', 'description'] : ['description'], }, }, cfg, }); } // Complex Functions // -------- private _isMinimalSettings(settings: JiraCfg): boolean { return !!( settings && settings.host && settings.userName && settings.password && (IS_ELECTRON || this._isExtension) ); } private _sendRequest$({ jiraReqCfg, cfg, isForce = false, }: { jiraReqCfg: JiraRequestCfg; cfg: JiraCfg; isForce?: boolean; }): Observable<any> { return this._isInterfacesReadyIfNeeded$.pipe( take(1), concatMap(() => IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true), ), concatMap(() => { // assign uuid to request to know which responsive belongs to which promise const requestId = `${jiraReqCfg.pathname}__${ jiraReqCfg.method || 'GET' }__${nanoid()}`; if (!isOnline()) { this._snackService.open({ type: 'CUSTOM', msg: T.G.NO_CON, ico: 'cloud_off', }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId }); } if (!this._isMinimalSettings(cfg)) { this._snackService.open({ type: 'ERROR', msg: !IS_ELECTRON && !this._isExtension ? T.F.JIRA.S.EXTENSION_NOT_LOADED : T.F.JIRA.S.INSUFFICIENT_SETTINGS, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId, }); } if (this._isBlockAccess && !isForce) { console.error('Blocked Jira Access to prevent being shut out'); this._bannerService.open({ id: BannerId.JiraUnblock, msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG, svgIco: 'jira', action: { label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK, fn: () => this.unblockAccess(), }, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Blocked access to prevent being shut out ' + requestId, }); } // BUILD REQUEST START // ------------------- const requestInit = this._makeRequestInit(jiraReqCfg, cfg); const queryStr = jiraReqCfg.query ? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}` : ''; const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`; const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim(); return this._sendRequestToExecutor$( requestId, url, requestInit, jiraReqCfg.transform, cfg, ); // NOTE: offline is sexier & easier than cache, but in case we change our mind... // const args = [requestId, url, requestInit, jiraReqCfg.transform]; // return this._issueCacheService.cache(url, requestInit, this._sendRequestToExecutor$.bind(this), args); }), ); } private _sendRequestToExecutor$( requestId: string, url: string, requestInit: RequestInit, transform: any, jiraCfg: JiraCfg, ): Observable<any> { // TODO refactor to observable for request canceling etc let promiseResolve; let promiseReject; const promise = new Promise((resolve, reject) => { promiseResolve = resolve; promiseReject = reject; }); // save to request log (also sets up timeout) this._requestsLog[requestId] = this._makeJiraRequestLogItem({ promiseResolve, promiseReject, requestId, requestInit, transform, jiraCfg, }); const requestToSend = { requestId, requestInit, url }; if (this._electronService.isElectronApp) { (this._electronService.ipcRenderer as typeof ipcRenderer).send( IPC.JIRA_MAKE_REQUEST_EVENT, { ...requestToSend, jiraCfg, }, ); } else if (this._isExtension) { this._chromeExtensionInterfaceService.dispatchEvent( 'SP_JIRA_REQUEST', requestToSend, ); } this._globalProgressBarService.countUp(url); return fromPromise(promise).pipe( catchError((err) => { console.log(err); console.log(getErrorTxt(err)); const errTxt = `Jira: ${getErrorTxt(err)}`; this._snackService.open({ type: 'ERROR', msg: errTxt }); return throwError({ [HANDLED_ERROR_PROP_STR]: errTxt }); }), first(), finalize(() => this._globalProgressBarService.countDown()), ); } private _makeRequestInit(jr: JiraRequestCfg, cfg: JiraCfg): RequestInit { return { method: jr.method || 'GET', ...(jr.body ? { body: JSON.stringify(jr.body) } : {}), headers: { 'Content-Type': 'application/json', ...(IS_ELECTRON && cfg.isWonkyCookieMode ? { Cookie: sessionStorage.getItem(SS.JIRA_WONKY_COOKIE) as string, } : cfg.usePAT ? { Cookie: '', authorization: `Bearer ${cfg.password}`, } : { Cookie: '', authorization: `Basic ${this._b64EncodeUnicode( `${cfg.userName}:${cfg.password}`, )}`, }), }, }; } private async _checkSetWonkyCookie(cfg: JiraCfg): Promise<string | null> { const ssVal = sessionStorage.getItem(SS.JIRA_WONKY_COOKIE); if (ssVal && ssVal.length > 0)
{ return ssVal; }
conditional_block
jira-api.service.ts
InterfacesReadyIfNeeded$: Observable<boolean> = IS_ELECTRON ? of(true).pipe() : this._chromeExtensionInterfaceService.onReady$.pipe(mapTo(true), shareReplay(1)); constructor( private _chromeExtensionInterfaceService: ChromeExtensionInterfaceService, private _electronService: ElectronService, private _globalProgressBarService: GlobalProgressBarService, private _snackService: SnackService, private _bannerService: BannerService, private _matDialog: MatDialog, ) { // set up callback listener for electron if (IS_ELECTRON) { (this._electronService.ipcRenderer as typeof ipcRenderer).on( IPC.JIRA_CB_EVENT, (ev: IpcRendererEvent, res: any) => { this._handleResponse(res); }, ); } this._chromeExtensionInterfaceService.onReady$.subscribe(() => { this._isExtension = true; this._chromeExtensionInterfaceService.addEventListener( 'SP_JIRA_RESPONSE', (ev: unknown, data: any) => { this._handleResponse(data); }, ); }); }
(): void { this._isBlockAccess = false; sessionStorage.removeItem(BLOCK_ACCESS_KEY); } issuePicker$(searchTerm: string, cfg: JiraCfg): Observable<SearchResultItem[]> { const searchStr = `${searchTerm}`; return this._sendRequest$({ jiraReqCfg: { pathname: 'issue/picker', followAllRedirects: true, query: { showSubTasks: true, showSubTaskParent: true, query: searchStr, currentJQL: cfg.searchJqlQuery, }, transform: mapToSearchResults, // NOTE: we pass the cfg as well to avoid race conditions }, cfg, }); } listFields$(cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: 'field', }, cfg, }); } findAutoImportIssues$( cfg: JiraCfg, isFetchAdditional?: boolean, maxResults: number = JIRA_MAX_RESULTS, ): Observable<JiraIssueReduced[]> { const options = { maxResults, fields: [ ...JIRA_ADDITIONAL_ISSUE_FIELDS, ...(cfg.storyPointFieldId ? [cfg.storyPointFieldId] : []), ], }; const searchQuery = cfg.autoAddBacklogJqlQuery; if (!searchQuery) { this._snackService.open({ type: 'ERROR', msg: T.F.JIRA.S.NO_AUTO_IMPORT_JQL, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'JiraApi: No search query for auto import', }); } return this._sendRequest$({ jiraReqCfg: { transform: mapIssuesResponse as (res: any, cfg?: JiraCfg) => any, pathname: 'search', method: 'POST', body: { ...options, jql: searchQuery, }, }, cfg, }); } getIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssue> { return this._getIssueById$(issueId, cfg, true); } getReducedIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssueReduced> { return this._getIssueById$(issueId, cfg, false); } getCurrentUser$(cfg: JiraCfg, isForce: boolean = false): Observable<JiraOriginalUser> { return this._sendRequest$({ jiraReqCfg: { pathname: `myself`, transform: mapResponse, }, cfg, isForce, }); } listStatus$(cfg: JiraCfg): Observable<JiraOriginalStatus[]> { return this._sendRequest$({ jiraReqCfg: { pathname: `status`, transform: mapResponse, }, cfg, }); } getTransitionsForIssue$( issueId: string, cfg: JiraCfg, ): Observable<JiraOriginalTransition[]> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/transitions`, method: 'GET', query: { expand: 'transitions.fields', }, transform: mapTransitionResponse, }, cfg, }); } transitionIssue$(issueId: string, transitionId: string, cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/transitions`, method: 'POST', body: { transition: { id: transitionId, }, }, transform: mapResponse, }, cfg, }); } updateAssignee$(issueId: string, accountId: string, cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/assignee`, method: 'PUT', body: { accountId, }, }, cfg, }); } addWorklog$({ issueId, started, timeSpent, comment, cfg, }: { issueId: string; started: string; timeSpent: number; comment: string; cfg: JiraCfg; }): Observable<any> { const worklog = { started: moment(started).locale('en').format(JIRA_DATETIME_FORMAT), timeSpentSeconds: Math.floor(timeSpent / 1000), comment, }; return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/worklog`, method: 'POST', body: worklog, transform: mapResponse, }, cfg, }); } private _getIssueById$( issueId: string, cfg: JiraCfg, isGetChangelog: boolean = false, ): Observable<JiraIssue> { return this._sendRequest$({ jiraReqCfg: { transform: mapIssueResponse as (res: any, cfg?: JiraCfg) => any, pathname: `issue/${issueId}`, query: { expand: isGetChangelog ? ['changelog', 'description'] : ['description'], }, }, cfg, }); } // Complex Functions // -------- private _isMinimalSettings(settings: JiraCfg): boolean { return !!( settings && settings.host && settings.userName && settings.password && (IS_ELECTRON || this._isExtension) ); } private _sendRequest$({ jiraReqCfg, cfg, isForce = false, }: { jiraReqCfg: JiraRequestCfg; cfg: JiraCfg; isForce?: boolean; }): Observable<any> { return this._isInterfacesReadyIfNeeded$.pipe( take(1), concatMap(() => IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true), ), concatMap(() => { // assign uuid to request to know which responsive belongs to which promise const requestId = `${jiraReqCfg.pathname}__${ jiraReqCfg.method || 'GET' }__${nanoid()}`; if (!isOnline()) { this._snackService.open({ type: 'CUSTOM', msg: T.G.NO_CON, ico: 'cloud_off', }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId }); } if (!this._isMinimalSettings(cfg)) { this._snackService.open({ type: 'ERROR', msg: !IS_ELECTRON && !this._isExtension ? T.F.JIRA.S.EXTENSION_NOT_LOADED : T.F.JIRA.S.INSUFFICIENT_SETTINGS, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId, }); } if (this._isBlockAccess && !isForce) { console.error('Blocked Jira Access to prevent being shut out'); this._bannerService.open({ id: BannerId.JiraUnblock, msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG, svgIco: 'jira', action: { label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK, fn: () => this.unblockAccess(), }, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Blocked access to prevent being shut out ' + requestId, }); } // BUILD REQUEST START // ------------------- const requestInit = this._makeRequestInit(jiraReqCfg, cfg); const queryStr = jiraReqCfg.query ? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}` : ''; const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`; const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim(); return this._sendRequestToExecutor$( requestId, url, requestInit,
unblockAccess
identifier_name
jira-api.service.ts
ReadyIfNeeded$: Observable<boolean> = IS_ELECTRON ? of(true).pipe() : this._chromeExtensionInterfaceService.onReady$.pipe(mapTo(true), shareReplay(1)); constructor( private _chromeExtensionInterfaceService: ChromeExtensionInterfaceService, private _electronService: ElectronService, private _globalProgressBarService: GlobalProgressBarService, private _snackService: SnackService, private _bannerService: BannerService, private _matDialog: MatDialog, ) { // set up callback listener for electron if (IS_ELECTRON) { (this._electronService.ipcRenderer as typeof ipcRenderer).on( IPC.JIRA_CB_EVENT, (ev: IpcRendererEvent, res: any) => { this._handleResponse(res); }, ); } this._chromeExtensionInterfaceService.onReady$.subscribe(() => { this._isExtension = true; this._chromeExtensionInterfaceService.addEventListener( 'SP_JIRA_RESPONSE', (ev: unknown, data: any) => { this._handleResponse(data); }, ); }); } unblockAccess(): void { this._isBlockAccess = false; sessionStorage.removeItem(BLOCK_ACCESS_KEY); } issuePicker$(searchTerm: string, cfg: JiraCfg): Observable<SearchResultItem[]> { const searchStr = `${searchTerm}`; return this._sendRequest$({ jiraReqCfg: { pathname: 'issue/picker', followAllRedirects: true, query: { showSubTasks: true, showSubTaskParent: true, query: searchStr, currentJQL: cfg.searchJqlQuery, }, transform: mapToSearchResults, // NOTE: we pass the cfg as well to avoid race conditions }, cfg, }); } listFields$(cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: 'field', }, cfg, }); } findAutoImportIssues$( cfg: JiraCfg, isFetchAdditional?: boolean, maxResults: number = JIRA_MAX_RESULTS, ): Observable<JiraIssueReduced[]>
return this._sendRequest$({ jiraReqCfg: { transform: mapIssuesResponse as (res: any, cfg?: JiraCfg) => any, pathname: 'search', method: 'POST', body: { ...options, jql: searchQuery, }, }, cfg, }); } getIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssue> { return this._getIssueById$(issueId, cfg, true); } getReducedIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssueReduced> { return this._getIssueById$(issueId, cfg, false); } getCurrentUser$(cfg: JiraCfg, isForce: boolean = false): Observable<JiraOriginalUser> { return this._sendRequest$({ jiraReqCfg: { pathname: `myself`, transform: mapResponse, }, cfg, isForce, }); } listStatus$(cfg: JiraCfg): Observable<JiraOriginalStatus[]> { return this._sendRequest$({ jiraReqCfg: { pathname: `status`, transform: mapResponse, }, cfg, }); } getTransitionsForIssue$( issueId: string, cfg: JiraCfg, ): Observable<JiraOriginalTransition[]> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/transitions`, method: 'GET', query: { expand: 'transitions.fields', }, transform: mapTransitionResponse, }, cfg, }); } transitionIssue$(issueId: string, transitionId: string, cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/transitions`, method: 'POST', body: { transition: { id: transitionId, }, }, transform: mapResponse, }, cfg, }); } updateAssignee$(issueId: string, accountId: string, cfg: JiraCfg): Observable<any> { return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/assignee`, method: 'PUT', body: { accountId, }, }, cfg, }); } addWorklog$({ issueId, started, timeSpent, comment, cfg, }: { issueId: string; started: string; timeSpent: number; comment: string; cfg: JiraCfg; }): Observable<any> { const worklog = { started: moment(started).locale('en').format(JIRA_DATETIME_FORMAT), timeSpentSeconds: Math.floor(timeSpent / 1000), comment, }; return this._sendRequest$({ jiraReqCfg: { pathname: `issue/${issueId}/worklog`, method: 'POST', body: worklog, transform: mapResponse, }, cfg, }); } private _getIssueById$( issueId: string, cfg: JiraCfg, isGetChangelog: boolean = false, ): Observable<JiraIssue> { return this._sendRequest$({ jiraReqCfg: { transform: mapIssueResponse as (res: any, cfg?: JiraCfg) => any, pathname: `issue/${issueId}`, query: { expand: isGetChangelog ? ['changelog', 'description'] : ['description'], }, }, cfg, }); } // Complex Functions // -------- private _isMinimalSettings(settings: JiraCfg): boolean { return !!( settings && settings.host && settings.userName && settings.password && (IS_ELECTRON || this._isExtension) ); } private _sendRequest$({ jiraReqCfg, cfg, isForce = false, }: { jiraReqCfg: JiraRequestCfg; cfg: JiraCfg; isForce?: boolean; }): Observable<any> { return this._isInterfacesReadyIfNeeded$.pipe( take(1), concatMap(() => IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true), ), concatMap(() => { // assign uuid to request to know which responsive belongs to which promise const requestId = `${jiraReqCfg.pathname}__${ jiraReqCfg.method || 'GET' }__${nanoid()}`; if (!isOnline()) { this._snackService.open({ type: 'CUSTOM', msg: T.G.NO_CON, ico: 'cloud_off', }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId }); } if (!this._isMinimalSettings(cfg)) { this._snackService.open({ type: 'ERROR', msg: !IS_ELECTRON && !this._isExtension ? T.F.JIRA.S.EXTENSION_NOT_LOADED : T.F.JIRA.S.INSUFFICIENT_SETTINGS, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId, }); } if (this._isBlockAccess && !isForce) { console.error('Blocked Jira Access to prevent being shut out'); this._bannerService.open({ id: BannerId.JiraUnblock, msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG, svgIco: 'jira', action: { label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK, fn: () => this.unblockAccess(), }, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Blocked access to prevent being shut out ' + requestId, }); } // BUILD REQUEST START // ------------------- const requestInit = this._makeRequestInit(jiraReqCfg, cfg); const queryStr = jiraReqCfg.query ? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}` : ''; const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`; const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim(); return this._sendRequestToExecutor$( requestId, url, requestInit,
{ const options = { maxResults, fields: [ ...JIRA_ADDITIONAL_ISSUE_FIELDS, ...(cfg.storyPointFieldId ? [cfg.storyPointFieldId] : []), ], }; const searchQuery = cfg.autoAddBacklogJqlQuery; if (!searchQuery) { this._snackService.open({ type: 'ERROR', msg: T.F.JIRA.S.NO_AUTO_IMPORT_JQL, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'JiraApi: No search query for auto import', }); }
identifier_body
jira-api.service.ts
) ); } private _sendRequest$({ jiraReqCfg, cfg, isForce = false, }: { jiraReqCfg: JiraRequestCfg; cfg: JiraCfg; isForce?: boolean; }): Observable<any> { return this._isInterfacesReadyIfNeeded$.pipe( take(1), concatMap(() => IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true), ), concatMap(() => { // assign uuid to request to know which responsive belongs to which promise const requestId = `${jiraReqCfg.pathname}__${ jiraReqCfg.method || 'GET' }__${nanoid()}`; if (!isOnline()) { this._snackService.open({ type: 'CUSTOM', msg: T.G.NO_CON, ico: 'cloud_off', }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId }); } if (!this._isMinimalSettings(cfg)) { this._snackService.open({ type: 'ERROR', msg: !IS_ELECTRON && !this._isExtension ? T.F.JIRA.S.EXTENSION_NOT_LOADED : T.F.JIRA.S.INSUFFICIENT_SETTINGS, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId, }); } if (this._isBlockAccess && !isForce) { console.error('Blocked Jira Access to prevent being shut out'); this._bannerService.open({ id: BannerId.JiraUnblock, msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG, svgIco: 'jira', action: { label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK, fn: () => this.unblockAccess(), }, }); return throwError({ [HANDLED_ERROR_PROP_STR]: 'Blocked access to prevent being shut out ' + requestId, }); } // BUILD REQUEST START // ------------------- const requestInit = this._makeRequestInit(jiraReqCfg, cfg); const queryStr = jiraReqCfg.query ? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}` : ''; const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`; const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim(); return this._sendRequestToExecutor$( requestId, url, requestInit, jiraReqCfg.transform, cfg, ); // NOTE: offline is sexier & easier than cache, but in case we change our mind... // const args = [requestId, url, requestInit, jiraReqCfg.transform]; // return this._issueCacheService.cache(url, requestInit, this._sendRequestToExecutor$.bind(this), args); }), ); } private _sendRequestToExecutor$( requestId: string, url: string, requestInit: RequestInit, transform: any, jiraCfg: JiraCfg, ): Observable<any> { // TODO refactor to observable for request canceling etc let promiseResolve; let promiseReject; const promise = new Promise((resolve, reject) => { promiseResolve = resolve; promiseReject = reject; }); // save to request log (also sets up timeout) this._requestsLog[requestId] = this._makeJiraRequestLogItem({ promiseResolve, promiseReject, requestId, requestInit, transform, jiraCfg, }); const requestToSend = { requestId, requestInit, url }; if (this._electronService.isElectronApp) { (this._electronService.ipcRenderer as typeof ipcRenderer).send( IPC.JIRA_MAKE_REQUEST_EVENT, { ...requestToSend, jiraCfg, }, ); } else if (this._isExtension) { this._chromeExtensionInterfaceService.dispatchEvent( 'SP_JIRA_REQUEST', requestToSend, ); } this._globalProgressBarService.countUp(url); return fromPromise(promise).pipe( catchError((err) => { console.log(err); console.log(getErrorTxt(err)); const errTxt = `Jira: ${getErrorTxt(err)}`; this._snackService.open({ type: 'ERROR', msg: errTxt }); return throwError({ [HANDLED_ERROR_PROP_STR]: errTxt }); }), first(), finalize(() => this._globalProgressBarService.countDown()), ); } private _makeRequestInit(jr: JiraRequestCfg, cfg: JiraCfg): RequestInit { return { method: jr.method || 'GET', ...(jr.body ? { body: JSON.stringify(jr.body) } : {}), headers: { 'Content-Type': 'application/json', ...(IS_ELECTRON && cfg.isWonkyCookieMode ? { Cookie: sessionStorage.getItem(SS.JIRA_WONKY_COOKIE) as string, } : cfg.usePAT ? { Cookie: '', authorization: `Bearer ${cfg.password}`, } : { Cookie: '', authorization: `Basic ${this._b64EncodeUnicode( `${cfg.userName}:${cfg.password}`, )}`, }), }, }; } private async _checkSetWonkyCookie(cfg: JiraCfg): Promise<string | null> { const ssVal = sessionStorage.getItem(SS.JIRA_WONKY_COOKIE); if (ssVal && ssVal.length > 0) { return ssVal; } else { const loginUrl = `${cfg.host}`; const apiUrl = `${cfg.host}/rest/api/${API_VERSION}/myself`; const val = await this._matDialog .open(DialogPromptComponent, { data: { // TODO add message to translations placeholder: 'Insert Cookie String', message: `<h3>Jira Wonky Cookie Authentication</h3> <ol> <li><a href="${loginUrl}">Log into Jira from your browser</a></li> <li><a href="${apiUrl}" target="_blank">Go to this api url</a></li> <li>Open up the dev tools (Ctrl+Shift+i)</li> <li>Navigate to the "Network" tab and reload page</li> <li>Click the "myself" file on the left side.</li> <li>In the "Headers" tab, scroll down and locate the "Request Headers" section.</li> <li>Locate the "cookie" header and right click to copy the value</li> <li>Fill this form with the cookie as "cookie: {paste-cookie-value}"</li> </ol>`, }, }) .afterClosed() .toPromise(); if (typeof val === 'string') { sessionStorage.setItem(SS.JIRA_WONKY_COOKIE, val); return val; } } this._blockAccess(); return null; } private _makeJiraRequestLogItem({ promiseResolve, promiseReject, requestId, requestInit, transform, jiraCfg, }: { promiseResolve: any; promiseReject: any; requestId: string; requestInit: RequestInit; transform: any; jiraCfg: JiraCfg; }): JiraRequestLogItem { return { transform, resolve: promiseResolve, reject: promiseReject, // NOTE: only needed for debug requestInit, jiraCfg, timeoutId: window.setTimeout(() => { console.log('ERROR', 'Jira Request timed out', requestInit); this._blockAccess(); // delete entry for promise this._snackService.open({ msg: T.F.JIRA.S.TIMED_OUT, type: 'ERROR', }); this._requestsLog[requestId].reject('Request timed out'); delete this._requestsLog[requestId]; }, JIRA_REQUEST_TIMEOUT_DURATION), }; } private _handleResponse(res: { requestId?: string; error?: any }): void { // check if proper id is given in callback and if exists in requestLog if (res.requestId && this._requestsLog[res.requestId]) { const currentRequest = this._requestsLog[res.requestId]; // cancel timeout for request window.clearTimeout(currentRequest.timeoutId); // resolve saved promise if (!res || res.error) { console.error('JIRA_RESPONSE_ERROR', res, currentRequest); // let msg = if ( res?.error && (res.error.statusCode === 401 || res.error === 401 || res.error.message === 'Forbidden' || res.error.message === 'Unauthorized') ) { this._blockAccess(); } currentRequest.reject(res); } else { // console.log('JIRA_RESPONSE', res); if (currentRequest.transform) { // data can be invalid, that's why we check try { currentRequest.resolve(currentRequest.transform(res, currentRequest.jiraCfg)); } catch (e) { console.log(res); console.log(currentRequest); console.error(e); this._snackService.open({ type: 'ERROR', msg: T.F.JIRA.S.INVALID_RESPONSE,
});
random_line_split
home.go
AllLimited: false, // Include also all public repositories of limited organisations Archived: util.OptionalBoolFalse, HasMilestones: util.OptionalBoolTrue, // Just needs display repos has milestones } if ctxUser.IsOrganization() && ctx.Org.Team != nil { repoOpts.TeamID = ctx.Org.Team.ID } var ( userRepoCond = repo_model.SearchRepositoryCondition(&repoOpts) // all repo condition user could visit repoCond = userRepoCond repoIDs []int64 reposQuery = ctx.FormString("repos") isShowClosed = ctx.FormString("state") == "closed" sortType = ctx.FormString("sort") page = ctx.FormInt("page") keyword = ctx.FormTrim("q") ) if page <= 1 { page = 1 } if len(reposQuery) != 0 { if issueReposQueryPattern.MatchString(reposQuery) { // remove "[" and "]" from string reposQuery = reposQuery[1 : len(reposQuery)-1] // for each ID (delimiter ",") add to int to repoIDs for _, rID := range strings.Split(reposQuery, ",") { // Ensure nonempty string entries if rID != "" && rID != "0" { rIDint64, err := strconv.ParseInt(rID, 10, 64) // If the repo id specified by query is not parseable or not accessible by user, just ignore it. if err == nil { repoIDs = append(repoIDs, rIDint64) } } } if len(repoIDs) > 0 { // Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs // But the original repoCond has a limitation repoCond = repoCond.And(builder.In("id", repoIDs)) } } else { log.Warn("issueReposQueryPattern not match with query") } } counts, err := issues_model.CountMilestonesByRepoCondAndKw(userRepoCond, keyword, isShowClosed) if err != nil { ctx.ServerError("CountMilestonesByRepoIDs", err) return } milestones, err := issues_model.SearchMilestones(repoCond, page, isShowClosed, sortType, keyword) if err != nil { ctx.ServerError("SearchMilestones", err) return } showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false) if err != nil { ctx.ServerError("SearchRepositoryByCondition", err) return } sort.Sort(showRepos) for i := 0; i < len(milestones); { for _, repo := range showRepos { if milestones[i].RepoID == repo.ID { milestones[i].Repo = repo break } } if milestones[i].Repo == nil { log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID) milestones = append(milestones[:i], milestones[i+1:]...) continue } milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: milestones[i].Repo.Link(), Metas: milestones[i].Repo.ComposeMetas(), Ctx: ctx, }, milestones[i].Content) if err != nil { ctx.ServerError("RenderString", err) return } if milestones[i].Repo.IsTimetrackerEnabled(ctx) { err := milestones[i].LoadTotalTrackedTime() if err != nil { ctx.ServerError("LoadTotalTrackedTime", err) return } } i++ } milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } var totalMilestoneStats *issues_model.MilestonesStats if len(repoIDs) == 0 { totalMilestoneStats = milestoneStats } else { totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } } showRepoIds := make(container.Set[int64], len(showRepos)) for _, repo := range showRepos { if repo.ID > 0 { showRepoIds.Add(repo.ID) } } if len(repoIDs) == 0 { repoIDs = showRepoIds.Values() } repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool { return !showRepoIds.Contains(v) }) var pagerCount int if isShowClosed { ctx.Data["State"] = "closed" ctx.Data["Total"] = totalMilestoneStats.ClosedCount pagerCount = int(milestoneStats.ClosedCount) } else { ctx.Data["State"] = "open" ctx.Data["Total"] = totalMilestoneStats.OpenCount pagerCount = int(milestoneStats.OpenCount) } ctx.Data["Milestones"] = milestones ctx.Data["Repos"] = showRepos ctx.Data["Counts"] = counts ctx.Data["MilestoneStats"] = milestoneStats ctx.Data["SortType"] = sortType ctx.Data["Keyword"] = keyword ctx.Data["RepoIDs"] = repoIDs ctx.Data["IsShowClosed"] = isShowClosed pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5) pager.AddParam(ctx, "q", "Keyword") pager.AddParam(ctx, "repos", "RepoIDs") pager.AddParam(ctx, "sort", "SortType") pager.AddParam(ctx, "state", "State") ctx.Data["Page"] = pager ctx.HTML(http.StatusOK, tplMilestones) } // Pulls renders the user's pull request overview page func Pulls(ctx *context.Context) { if unit.TypePullRequests.UnitGlobalDisabled() { log.Debug("Pull request overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("pull_requests") ctx.Data["PageIsPulls"] = true ctx.Data["SingleRepoAction"] = "pull" buildIssueOverview(ctx, unit.TypePullRequests) } // Issues renders the user's issues overview page func Issues(ctx *context.Context) { if unit.TypeIssues.UnitGlobalDisabled() { log.Debug("Issues overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("issues") ctx.Data["PageIsIssues"] = true ctx.Data["SingleRepoAction"] = "issue" buildIssueOverview(ctx, unit.TypeIssues) } // Regexp for repos query var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`) func buildIssueOverview(ctx *context.Context, unitType unit.Type) { // ---------------------------------------------------- // Determine user; can be either user or organization. // Return with NotFound or ServerError if unsuccessful. // ---------------------------------------------------- ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return } var ( viewType string sortType = ctx.FormString("sort") filterMode int ) // Default to recently updated, unlike repository issues list if sortType == "" { sortType = "recentupdate" } // -------------------------------------------------------------------------------- // Distinguish User from Organization. // Org: // - Remember pre-determined viewType string for later. Will be posted to ctx.Data. // Organization does not have view type and filter mode. // User: // - Use ctx.FormString("type") to determine filterMode. // The type is set when clicking for example "assigned to me" on the overview page. // - Remember either this or a fallback. Will be posted to ctx.Data. // -------------------------------------------------------------------------------- // TODO: distinguish during routing viewType = ctx.FormString("type") switch viewType { case "assigned": filterMode = issues_model.FilterModeAssign case "created_by": filterMode = issues_model.FilterModeCreate case "mentioned": filterMode = issues_model.FilterModeMention
{ if unit.TypeIssues.UnitGlobalDisabled() && unit.TypePullRequests.UnitGlobalDisabled() { log.Debug("Milestones overview page not available as both issues and pull requests are globally disabled") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("milestones") ctx.Data["PageIsMilestonesDashboard"] = true ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return } repoOpts := repo_model.SearchRepoOptions{ Actor: ctx.Doer, OwnerID: ctxUser.ID, Private: true, AllPublic: false, // Include also all public repositories of users and public organisations
identifier_body
home.go
10, 64) // If the repo id specified by query is not parseable or not accessible by user, just ignore it. if err == nil { repoIDs = append(repoIDs, rIDint64) } } } if len(repoIDs) > 0 { // Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs // But the original repoCond has a limitation repoCond = repoCond.And(builder.In("id", repoIDs)) } } else { log.Warn("issueReposQueryPattern not match with query") } } counts, err := issues_model.CountMilestonesByRepoCondAndKw(userRepoCond, keyword, isShowClosed) if err != nil { ctx.ServerError("CountMilestonesByRepoIDs", err) return } milestones, err := issues_model.SearchMilestones(repoCond, page, isShowClosed, sortType, keyword)
ctx.ServerError("SearchMilestones", err) return } showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false) if err != nil { ctx.ServerError("SearchRepositoryByCondition", err) return } sort.Sort(showRepos) for i := 0; i < len(milestones); { for _, repo := range showRepos { if milestones[i].RepoID == repo.ID { milestones[i].Repo = repo break } } if milestones[i].Repo == nil { log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID) milestones = append(milestones[:i], milestones[i+1:]...) continue } milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: milestones[i].Repo.Link(), Metas: milestones[i].Repo.ComposeMetas(), Ctx: ctx, }, milestones[i].Content) if err != nil { ctx.ServerError("RenderString", err) return } if milestones[i].Repo.IsTimetrackerEnabled(ctx) { err := milestones[i].LoadTotalTrackedTime() if err != nil { ctx.ServerError("LoadTotalTrackedTime", err) return } } i++ } milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } var totalMilestoneStats *issues_model.MilestonesStats if len(repoIDs) == 0 { totalMilestoneStats = milestoneStats } else { totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } } showRepoIds := make(container.Set[int64], len(showRepos)) for _, repo := range showRepos { if repo.ID > 0 { showRepoIds.Add(repo.ID) } } if len(repoIDs) == 0 { repoIDs = showRepoIds.Values() } repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool { return !showRepoIds.Contains(v) }) var pagerCount int if isShowClosed { ctx.Data["State"] = "closed" ctx.Data["Total"] = totalMilestoneStats.ClosedCount pagerCount = int(milestoneStats.ClosedCount) } else { ctx.Data["State"] = "open" ctx.Data["Total"] = totalMilestoneStats.OpenCount pagerCount = int(milestoneStats.OpenCount) } ctx.Data["Milestones"] = milestones ctx.Data["Repos"] = showRepos ctx.Data["Counts"] = counts ctx.Data["MilestoneStats"] = milestoneStats ctx.Data["SortType"] = sortType ctx.Data["Keyword"] = keyword ctx.Data["RepoIDs"] = repoIDs ctx.Data["IsShowClosed"] = isShowClosed pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5) pager.AddParam(ctx, "q", "Keyword") pager.AddParam(ctx, "repos", "RepoIDs") pager.AddParam(ctx, "sort", "SortType") pager.AddParam(ctx, "state", "State") ctx.Data["Page"] = pager ctx.HTML(http.StatusOK, tplMilestones) } // Pulls renders the user's pull request overview page func Pulls(ctx *context.Context) { if unit.TypePullRequests.UnitGlobalDisabled() { log.Debug("Pull request overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("pull_requests") ctx.Data["PageIsPulls"] = true ctx.Data["SingleRepoAction"] = "pull" buildIssueOverview(ctx, unit.TypePullRequests) } // Issues renders the user's issues overview page func Issues(ctx *context.Context) { if unit.TypeIssues.UnitGlobalDisabled() { log.Debug("Issues overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("issues") ctx.Data["PageIsIssues"] = true ctx.Data["SingleRepoAction"] = "issue" buildIssueOverview(ctx, unit.TypeIssues) } // Regexp for repos query var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`) func buildIssueOverview(ctx *context.Context, unitType unit.Type) { // ---------------------------------------------------- // Determine user; can be either user or organization. // Return with NotFound or ServerError if unsuccessful. // ---------------------------------------------------- ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return } var ( viewType string sortType = ctx.FormString("sort") filterMode int ) // Default to recently updated, unlike repository issues list if sortType == "" { sortType = "recentupdate" } // -------------------------------------------------------------------------------- // Distinguish User from Organization. // Org: // - Remember pre-determined viewType string for later. Will be posted to ctx.Data. // Organization does not have view type and filter mode. // User: // - Use ctx.FormString("type") to determine filterMode. // The type is set when clicking for example "assigned to me" on the overview page. // - Remember either this or a fallback. Will be posted to ctx.Data. // -------------------------------------------------------------------------------- // TODO: distinguish during routing viewType = ctx.FormString("type") switch viewType { case "assigned": filterMode = issues_model.FilterModeAssign case "created_by": filterMode = issues_model.FilterModeCreate case "mentioned": filterMode = issues_model.FilterModeMention case "review_requested": filterMode = issues_model.FilterModeReviewRequested case "reviewed_by": filterMode = issues_model.FilterModeReviewed case "your_repositories": fallthrough default: filterMode = issues_model.FilterModeYourRepositories viewType = "your_repositories" } // -------------------------------------------------------------------------- // Build opts (IssuesOptions), which contains filter information. // Will eventually be used to retrieve issues relevant for the overview page. // Note: Non-final states of opts are used in-between, namely for: // - Keyword search // - Count Issues by repo // -------------------------------------------------------------------------- // Get repository IDs where User/Org/Team has access. var team *organization.Team var org *organization.Organization if ctx.Org != nil { org = ctx.Org.Organization team = ctx.Org.Team } isPullList := unitType == unit.TypePullRequests opts := &issues_model.IssuesOptions{ IsPull: util.OptionalBoolOf(isPullList), SortType: sortType, IsArchived: util.OptionalBoolFalse, Org: org, Team: team, User: ctx.Doer, } // Search all repositories which // // As user: // - Owns the repository. // - Have collaborator permissions in repository. // // As org: // - Owns the repository. // // As team: // - Team org's owns the repository. // - Team has read permission to repository. repoOpts := &repo_model.SearchRepoOptions{ Actor: ctx.Doer, OwnerID: ctxUser.ID, Private: true, AllPublic: false, AllLimited: false, Collaborate: util.OptionalBoolNone, UnitType: unitType, Archived: util.OptionalBoolFalse, } if team != nil { repoOpts.TeamID = team.ID } accessibleRepos := container.Set[int64]{} { ids, _, err := repo_model.SearchRepositoryIDs(repoOpts)
if err != nil {
random_line_split
home.go
err != nil { ctx.ServerError("LoadTotalTrackedTime", err) return } } i++ } milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } var totalMilestoneStats *issues_model.MilestonesStats if len(repoIDs) == 0 { totalMilestoneStats = milestoneStats } else { totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } } showRepoIds := make(container.Set[int64], len(showRepos)) for _, repo := range showRepos { if repo.ID > 0 { showRepoIds.Add(repo.ID) } } if len(repoIDs) == 0 { repoIDs = showRepoIds.Values() } repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool { return !showRepoIds.Contains(v) }) var pagerCount int if isShowClosed { ctx.Data["State"] = "closed" ctx.Data["Total"] = totalMilestoneStats.ClosedCount pagerCount = int(milestoneStats.ClosedCount) } else { ctx.Data["State"] = "open" ctx.Data["Total"] = totalMilestoneStats.OpenCount pagerCount = int(milestoneStats.OpenCount) } ctx.Data["Milestones"] = milestones ctx.Data["Repos"] = showRepos ctx.Data["Counts"] = counts ctx.Data["MilestoneStats"] = milestoneStats ctx.Data["SortType"] = sortType ctx.Data["Keyword"] = keyword ctx.Data["RepoIDs"] = repoIDs ctx.Data["IsShowClosed"] = isShowClosed pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5) pager.AddParam(ctx, "q", "Keyword") pager.AddParam(ctx, "repos", "RepoIDs") pager.AddParam(ctx, "sort", "SortType") pager.AddParam(ctx, "state", "State") ctx.Data["Page"] = pager ctx.HTML(http.StatusOK, tplMilestones) } // Pulls renders the user's pull request overview page func Pulls(ctx *context.Context) { if unit.TypePullRequests.UnitGlobalDisabled() { log.Debug("Pull request overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("pull_requests") ctx.Data["PageIsPulls"] = true ctx.Data["SingleRepoAction"] = "pull" buildIssueOverview(ctx, unit.TypePullRequests) } // Issues renders the user's issues overview page func Issues(ctx *context.Context) { if unit.TypeIssues.UnitGlobalDisabled() { log.Debug("Issues overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("issues") ctx.Data["PageIsIssues"] = true ctx.Data["SingleRepoAction"] = "issue" buildIssueOverview(ctx, unit.TypeIssues) } // Regexp for repos query var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`) func buildIssueOverview(ctx *context.Context, unitType unit.Type) { // ---------------------------------------------------- // Determine user; can be either user or organization. // Return with NotFound or ServerError if unsuccessful. // ---------------------------------------------------- ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return } var ( viewType string sortType = ctx.FormString("sort") filterMode int ) // Default to recently updated, unlike repository issues list if sortType == "" { sortType = "recentupdate" } // -------------------------------------------------------------------------------- // Distinguish User from Organization. // Org: // - Remember pre-determined viewType string for later. Will be posted to ctx.Data. // Organization does not have view type and filter mode. // User: // - Use ctx.FormString("type") to determine filterMode. // The type is set when clicking for example "assigned to me" on the overview page. // - Remember either this or a fallback. Will be posted to ctx.Data. // -------------------------------------------------------------------------------- // TODO: distinguish during routing viewType = ctx.FormString("type") switch viewType { case "assigned": filterMode = issues_model.FilterModeAssign case "created_by": filterMode = issues_model.FilterModeCreate case "mentioned": filterMode = issues_model.FilterModeMention case "review_requested": filterMode = issues_model.FilterModeReviewRequested case "reviewed_by": filterMode = issues_model.FilterModeReviewed case "your_repositories": fallthrough default: filterMode = issues_model.FilterModeYourRepositories viewType = "your_repositories" } // -------------------------------------------------------------------------- // Build opts (IssuesOptions), which contains filter information. // Will eventually be used to retrieve issues relevant for the overview page. // Note: Non-final states of opts are used in-between, namely for: // - Keyword search // - Count Issues by repo // -------------------------------------------------------------------------- // Get repository IDs where User/Org/Team has access. var team *organization.Team var org *organization.Organization if ctx.Org != nil { org = ctx.Org.Organization team = ctx.Org.Team } isPullList := unitType == unit.TypePullRequests opts := &issues_model.IssuesOptions{ IsPull: util.OptionalBoolOf(isPullList), SortType: sortType, IsArchived: util.OptionalBoolFalse, Org: org, Team: team, User: ctx.Doer, } // Search all repositories which // // As user: // - Owns the repository. // - Have collaborator permissions in repository. // // As org: // - Owns the repository. // // As team: // - Team org's owns the repository. // - Team has read permission to repository. repoOpts := &repo_model.SearchRepoOptions{ Actor: ctx.Doer, OwnerID: ctxUser.ID, Private: true, AllPublic: false, AllLimited: false, Collaborate: util.OptionalBoolNone, UnitType: unitType, Archived: util.OptionalBoolFalse, } if team != nil { repoOpts.TeamID = team.ID } accessibleRepos := container.Set[int64]{} { ids, _, err := repo_model.SearchRepositoryIDs(repoOpts) if err != nil { ctx.ServerError("SearchRepositoryIDs", err) return } accessibleRepos.AddMultiple(ids...) opts.RepoIDs = ids if len(opts.RepoIDs) == 0 { // no repos found, don't let the indexer return all repos opts.RepoIDs = []int64{0} } } switch filterMode { case issues_model.FilterModeAll: case issues_model.FilterModeYourRepositories: case issues_model.FilterModeAssign: opts.AssigneeID = ctx.Doer.ID case issues_model.FilterModeCreate: opts.PosterID = ctx.Doer.ID case issues_model.FilterModeMention: opts.MentionedID = ctx.Doer.ID case issues_model.FilterModeReviewRequested: opts.ReviewRequestedID = ctx.Doer.ID case issues_model.FilterModeReviewed: opts.ReviewedID = ctx.Doer.ID } // keyword holds the search term entered into the search field. keyword := strings.Trim(ctx.FormString("q"), " ") ctx.Data["Keyword"] = keyword // Educated guess: Do or don't show closed issues. isShowClosed := ctx.FormString("state") == "closed" opts.IsClosed = util.OptionalBoolOf(isShowClosed) // Filter repos and count issues in them. Count will be used later. // USING NON-FINAL STATE OF opts FOR A QUERY. issueCountByRepo, err := issue_indexer.CountIssuesByRepo(ctx, issue_indexer.ToSearchOptions(keyword, opts)) if err != nil { ctx.ServerError("CountIssuesByRepo", err) return } // Make sure page number is at least 1. Will be posted to ctx.Data. page := ctx.FormInt("page") if page <= 1 { page = 1 } opts.Paginator = &db.ListOptions{ Page: page, PageSize: setting.UI.IssuePagingNum, } // Get IDs for labels (a filter option for issues/pulls). // Required for IssuesOptions. var labelIDs []int64 selectedLabels := ctx.FormString("labels") if len(selectedLabels) > 0 && selectedLabels != "0" { var err error labelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ",")) if err != nil
{ ctx.ServerError("StringsToInt64s", err) return }
conditional_block
home.go
10, 64) // If the repo id specified by query is not parseable or not accessible by user, just ignore it. if err == nil { repoIDs = append(repoIDs, rIDint64) } } } if len(repoIDs) > 0 { // Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs // But the original repoCond has a limitation repoCond = repoCond.And(builder.In("id", repoIDs)) } } else { log.Warn("issueReposQueryPattern not match with query") } } counts, err := issues_model.CountMilestonesByRepoCondAndKw(userRepoCond, keyword, isShowClosed) if err != nil { ctx.ServerError("CountMilestonesByRepoIDs", err) return } milestones, err := issues_model.SearchMilestones(repoCond, page, isShowClosed, sortType, keyword) if err != nil { ctx.ServerError("SearchMilestones", err) return } showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false) if err != nil { ctx.ServerError("SearchRepositoryByCondition", err) return } sort.Sort(showRepos) for i := 0; i < len(milestones); { for _, repo := range showRepos { if milestones[i].RepoID == repo.ID { milestones[i].Repo = repo break } } if milestones[i].Repo == nil { log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID) milestones = append(milestones[:i], milestones[i+1:]...) continue } milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: milestones[i].Repo.Link(), Metas: milestones[i].Repo.ComposeMetas(), Ctx: ctx, }, milestones[i].Content) if err != nil { ctx.ServerError("RenderString", err) return } if milestones[i].Repo.IsTimetrackerEnabled(ctx) { err := milestones[i].LoadTotalTrackedTime() if err != nil { ctx.ServerError("LoadTotalTrackedTime", err) return } } i++ } milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } var totalMilestoneStats *issues_model.MilestonesStats if len(repoIDs) == 0 { totalMilestoneStats = milestoneStats } else { totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword) if err != nil { ctx.ServerError("GetMilestoneStats", err) return } } showRepoIds := make(container.Set[int64], len(showRepos)) for _, repo := range showRepos { if repo.ID > 0 { showRepoIds.Add(repo.ID) } } if len(repoIDs) == 0 { repoIDs = showRepoIds.Values() } repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool { return !showRepoIds.Contains(v) }) var pagerCount int if isShowClosed { ctx.Data["State"] = "closed" ctx.Data["Total"] = totalMilestoneStats.ClosedCount pagerCount = int(milestoneStats.ClosedCount) } else { ctx.Data["State"] = "open" ctx.Data["Total"] = totalMilestoneStats.OpenCount pagerCount = int(milestoneStats.OpenCount) } ctx.Data["Milestones"] = milestones ctx.Data["Repos"] = showRepos ctx.Data["Counts"] = counts ctx.Data["MilestoneStats"] = milestoneStats ctx.Data["SortType"] = sortType ctx.Data["Keyword"] = keyword ctx.Data["RepoIDs"] = repoIDs ctx.Data["IsShowClosed"] = isShowClosed pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5) pager.AddParam(ctx, "q", "Keyword") pager.AddParam(ctx, "repos", "RepoIDs") pager.AddParam(ctx, "sort", "SortType") pager.AddParam(ctx, "state", "State") ctx.Data["Page"] = pager ctx.HTML(http.StatusOK, tplMilestones) } // Pulls renders the user's pull request overview page func Pulls(ctx *context.Context) { if unit.TypePullRequests.UnitGlobalDisabled() { log.Debug("Pull request overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("pull_requests") ctx.Data["PageIsPulls"] = true ctx.Data["SingleRepoAction"] = "pull" buildIssueOverview(ctx, unit.TypePullRequests) } // Issues renders the user's issues overview page func Issues(ctx *context.Context) { if unit.TypeIssues.UnitGlobalDisabled() { log.Debug("Issues overview page not available as it is globally disabled.") ctx.Status(http.StatusNotFound) return } ctx.Data["Title"] = ctx.Tr("issues") ctx.Data["PageIsIssues"] = true ctx.Data["SingleRepoAction"] = "issue" buildIssueOverview(ctx, unit.TypeIssues) } // Regexp for repos query var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`) func
(ctx *context.Context, unitType unit.Type) { // ---------------------------------------------------- // Determine user; can be either user or organization. // Return with NotFound or ServerError if unsuccessful. // ---------------------------------------------------- ctxUser := getDashboardContextUser(ctx) if ctx.Written() { return } var ( viewType string sortType = ctx.FormString("sort") filterMode int ) // Default to recently updated, unlike repository issues list if sortType == "" { sortType = "recentupdate" } // -------------------------------------------------------------------------------- // Distinguish User from Organization. // Org: // - Remember pre-determined viewType string for later. Will be posted to ctx.Data. // Organization does not have view type and filter mode. // User: // - Use ctx.FormString("type") to determine filterMode. // The type is set when clicking for example "assigned to me" on the overview page. // - Remember either this or a fallback. Will be posted to ctx.Data. // -------------------------------------------------------------------------------- // TODO: distinguish during routing viewType = ctx.FormString("type") switch viewType { case "assigned": filterMode = issues_model.FilterModeAssign case "created_by": filterMode = issues_model.FilterModeCreate case "mentioned": filterMode = issues_model.FilterModeMention case "review_requested": filterMode = issues_model.FilterModeReviewRequested case "reviewed_by": filterMode = issues_model.FilterModeReviewed case "your_repositories": fallthrough default: filterMode = issues_model.FilterModeYourRepositories viewType = "your_repositories" } // -------------------------------------------------------------------------- // Build opts (IssuesOptions), which contains filter information. // Will eventually be used to retrieve issues relevant for the overview page. // Note: Non-final states of opts are used in-between, namely for: // - Keyword search // - Count Issues by repo // -------------------------------------------------------------------------- // Get repository IDs where User/Org/Team has access. var team *organization.Team var org *organization.Organization if ctx.Org != nil { org = ctx.Org.Organization team = ctx.Org.Team } isPullList := unitType == unit.TypePullRequests opts := &issues_model.IssuesOptions{ IsPull: util.OptionalBoolOf(isPullList), SortType: sortType, IsArchived: util.OptionalBoolFalse, Org: org, Team: team, User: ctx.Doer, } // Search all repositories which // // As user: // - Owns the repository. // - Have collaborator permissions in repository. // // As org: // - Owns the repository. // // As team: // - Team org's owns the repository. // - Team has read permission to repository. repoOpts := &repo_model.SearchRepoOptions{ Actor: ctx.Doer, OwnerID: ctxUser.ID, Private: true, AllPublic: false, AllLimited: false, Collaborate: util.OptionalBoolNone, UnitType: unitType, Archived: util.OptionalBoolFalse, } if team != nil { repoOpts.TeamID = team.ID } accessibleRepos := container.Set[int64]{} { ids, _, err := repo_model.SearchRepositoryIDs(repoOpts)
buildIssueOverview
identifier_name
lib.rs
/// Update read buffer fn check(&mut self) -> Poll<(), std::io::Error> { loop { // Why do I have a loop here? I forgot?? self.read_buffer.reserve(512); let n = try_ready!(self.socket.read_buf(&mut self.read_buffer)); if n == 0 { return Ok(Async::Ready(())); } } } } /// Encode a message and place it in a write buffer pub fn compose_message(from: usize, msg: String) -> BytesMut { let mut write_buffer = BytesMut::new(); write_buffer.reserve(512); write_buffer.put_u16_be(from as u16); write_buffer.put_i64_be(chrono::Local::now().timestamp()); write_buffer.put_u16_be(msg.len() as u16); write_buffer.put(msg); write_buffer } /// Message header #[derive(Clone, Debug)] pub struct SprinklerProtoHeader { id: u16, timestamp: i64, len: u16 } #[cfg(feature = "master")] impl Stream for SprinklerProto { type Item = SprinklerProtoHeader; type Error = std::io::Error; fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { let sock_closed = self.check()?.is_ready(); if self.read_buffer.len() > 12 { Ok(Async::Ready(Some(SprinklerProtoHeader { id: BigEndian::read_u16(&self.read_buffer.split_to(2)), timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64, len: BigEndian::read_u16(&self.read_buffer.split_to(2)) }))) } else { if sock_closed { Ok(Async::Ready(None)) } else { Ok(Async::NotReady) } } } } #[derive(Clone)] pub enum Transmitter<T> { /// Synchronous Sender Synchronous(std::sync::mpsc::Sender<T>), /// Asynchronous Sender
pub fn send(&self, t: T) -> Result<(), ()> { match self { Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()), Transmitter::Asynchronous(sender) => { tokio::spawn({ let sender = sender.clone(); sender.send(t).into_future().map(|_| ()).map_err(|_| ()) }); Ok(()) } } } } #[derive(Clone)] pub struct Switch { pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>> } impl Switch { pub fn new() -> Self { Switch { inner: Arc::new(Mutex::new(HashMap::new())) } } pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) { let mut switch_init = self.inner.lock().unwrap(); for i in sprinklers { match i.activate_master() { ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); }, ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); } } } } } /// Message relay between master threads and TCP sockets connected to remote agents #[cfg(feature = "master")] pub struct SprinklerRelay { pub proto: SprinklerProto, pub header: SprinklerProtoHeader, pub switch: Switch } #[cfg(feature = "master")] impl Future for SprinklerRelay { type Item = (); type Error = std::io::Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { let sock_closed = self.proto.check()?.is_ready(); if self.proto.read_buffer.len() >= self.header.len as usize { if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) { if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) { if let Err(_) = tx.send(Message{ timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0), body: msgbody }) { warn!("Failed to relay the message."); } } Ok(Async::Ready(())) } else { warn!("Failed to decode message."); Ok(Async::Ready(())) } } else { if sock_closed { warn!("Message was lost."); Ok(Async::Ready(())) } else { Ok(Async::NotReady) } } } } pub enum ActivationResult { /// A realtime algorithm based master thread that monitors agent threads RealtimeMonitor(std::sync::mpsc::Sender<Message>), /// An asynchronous master thread that monitors agent threads AsyncMonitor(futures::sync::mpsc::Sender<Message>) } /// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id. /// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads, /// so that there will not be a single point of failure. /// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies. /// The systemwide configuration is done by replicating the same config file and executable. pub trait Sprinkler { /// Build a new sprinkler fn build(options: SprinklerOptions) -> Self where Self: Sized; /// Get systemwide id fn id(&self) -> usize; /// Get the hostname, where the agent would be deployed fn hostname(&self) -> &str; /// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel fn activate_master(&self) -> ActivationResult; /// Start the agent thread fn activate_agent(&self); /// Kill the master thread. Note: there is no way to reach out and kill any agent threads. fn deactivate(&self); } /// Sprinkler thread level message format #[derive(Clone)] pub struct Message { pub timestamp: NaiveDateTime, pub body: String } #[derive(PartialEq, Debug, Copy, Clone)] pub enum Anomaly { Negative, // No anomaly has been detected Positive, // Anomaly has occurred Fixing(usize), // Has attempted to intervene N times OutOfControl // Has given up trying because the programmed strategy will not work } impl Anomaly { pub fn get_retry_unchecked(&self) -> usize { match self { Anomaly::Negative | Anomaly::Positive => 0, Anomaly::Fixing(n) => *n, Anomaly::OutOfControl => std::usize::MAX } } pub fn escalate(&self, max_retry: usize) -> AnomalyTransition { match self { Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(), Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(), Anomaly::Fixing(n) => if *n < max_retry { AnomalyTransition::Fixing } else { (*self >> Anomaly::OutOfControl).unwrap() }, Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(), } } pub fn diminish(&self) -> AnomalyTransition { (*self >> Anomaly::Negative).unwrap() } } #[derive(PartialEq, Debug, Copy, Clone)] pub enum AnomalyTransition { Normal, // Negative -> Negative Occurred, // Negative -> Positive Unhandled, // Positive -> Positive Disappeared, // Positive | OutOfControl -> Negative Fixed, // Fixing(_) -> Negative Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1) GaveUp, // Fixing(m) -> OutOfControl HasGivenUp // OutOfControl -> OutOfControl } use std::ops::Shr; use std::ops::ShrAssign; impl Shr for Anomaly { type Output = Option<AnomalyTransition>; fn shr(self, rhs: Self) -> Option<AnomalyTransition> { match (self, rhs) { (Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal), (Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred), (Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled), (Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared), (Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing), (Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing), (Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed), (Anomaly::Fix
Asynchronous(futures::sync::mpsc::Sender<T>) } impl<T> Transmitter<T> where T: 'static + Send { /// Send a message through the underlying Sender
random_line_split
lib.rs
} #[cfg(feature = "master")] type EncryptedStream = tokio_tls::TlsStream<TcpStream>; /// A TCP stream adapter to convert between byte stream and objects #[cfg(feature = "master")] #[derive(Debug)] pub struct SprinklerProto { socket: EncryptedStream, read_buffer: BytesMut, } #[cfg(feature = "master")] impl SprinklerProto { pub fn new(socket: EncryptedStream) -> Self { SprinklerProto { socket, read_buffer: BytesMut::new(), } } /// Update read buffer fn check(&mut self) -> Poll<(), std::io::Error> { loop { // Why do I have a loop here? I forgot?? self.read_buffer.reserve(512); let n = try_ready!(self.socket.read_buf(&mut self.read_buffer)); if n == 0 { return Ok(Async::Ready(())); } } } } /// Encode a message and place it in a write buffer pub fn compose_message(from: usize, msg: String) -> BytesMut { let mut write_buffer = BytesMut::new(); write_buffer.reserve(512); write_buffer.put_u16_be(from as u16); write_buffer.put_i64_be(chrono::Local::now().timestamp()); write_buffer.put_u16_be(msg.len() as u16); write_buffer.put(msg); write_buffer } /// Message header #[derive(Clone, Debug)] pub struct SprinklerProtoHeader { id: u16, timestamp: i64, len: u16 } #[cfg(feature = "master")] impl Stream for SprinklerProto { type Item = SprinklerProtoHeader; type Error = std::io::Error; fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { let sock_closed = self.check()?.is_ready(); if self.read_buffer.len() > 12 { Ok(Async::Ready(Some(SprinklerProtoHeader { id: BigEndian::read_u16(&self.read_buffer.split_to(2)), timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64, len: BigEndian::read_u16(&self.read_buffer.split_to(2)) }))) } else { if sock_closed { Ok(Async::Ready(None)) } else { Ok(Async::NotReady) } } } } #[derive(Clone)] pub enum Transmitter<T> { /// Synchronous Sender Synchronous(std::sync::mpsc::Sender<T>), /// Asynchronous Sender Asynchronous(futures::sync::mpsc::Sender<T>) } impl<T> Transmitter<T> where T: 'static + Send { /// Send a message through the underlying Sender pub fn send(&self, t: T) -> Result<(), ()> { match self { Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()), Transmitter::Asynchronous(sender) => { tokio::spawn({ let sender = sender.clone(); sender.send(t).into_future().map(|_| ()).map_err(|_| ()) }); Ok(()) } } } } #[derive(Clone)] pub struct Switch { pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>> } impl Switch { pub fn new() -> Self { Switch { inner: Arc::new(Mutex::new(HashMap::new())) } } pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) { let mut switch_init = self.inner.lock().unwrap(); for i in sprinklers { match i.activate_master() { ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); }, ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); } } } } } /// Message relay between master threads and TCP sockets connected to remote agents #[cfg(feature = "master")] pub struct SprinklerRelay { pub proto: SprinklerProto, pub header: SprinklerProtoHeader, pub switch: Switch } #[cfg(feature = "master")] impl Future for SprinklerRelay { type Item = (); type Error = std::io::Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { let sock_closed = self.proto.check()?.is_ready(); if self.proto.read_buffer.len() >= self.header.len as usize { if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) { if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) { if let Err(_) = tx.send(Message{ timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0), body: msgbody }) { warn!("Failed to relay the message."); } } Ok(Async::Ready(())) } else { warn!("Failed to decode message."); Ok(Async::Ready(())) } } else { if sock_closed { warn!("Message was lost."); Ok(Async::Ready(())) } else { Ok(Async::NotReady) } } } } pub enum ActivationResult { /// A realtime algorithm based master thread that monitors agent threads RealtimeMonitor(std::sync::mpsc::Sender<Message>), /// An asynchronous master thread that monitors agent threads AsyncMonitor(futures::sync::mpsc::Sender<Message>) } /// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id. /// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads, /// so that there will not be a single point of failure. /// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies. /// The systemwide configuration is done by replicating the same config file and executable. pub trait Sprinkler { /// Build a new sprinkler fn build(options: SprinklerOptions) -> Self where Self: Sized; /// Get systemwide id fn id(&self) -> usize; /// Get the hostname, where the agent would be deployed fn hostname(&self) -> &str; /// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel fn activate_master(&self) -> ActivationResult; /// Start the agent thread fn activate_agent(&self); /// Kill the master thread. Note: there is no way to reach out and kill any agent threads. fn deactivate(&self); } /// Sprinkler thread level message format #[derive(Clone)] pub struct Message { pub timestamp: NaiveDateTime, pub body: String } #[derive(PartialEq, Debug, Copy, Clone)] pub enum Anomaly { Negative, // No anomaly has been detected Positive, // Anomaly has occurred Fixing(usize), // Has attempted to intervene N times OutOfControl // Has given up trying because the programmed strategy will not work } impl Anomaly { pub fn get_retry_unchecked(&self) -> usize { match self { Anomaly::Negative | Anomaly::Positive => 0, Anomaly::Fixing(n) => *n, Anomaly::OutOfControl => std::usize::MAX } } pub fn escalate(&self, max_retry: usize) -> AnomalyTransition { match self { Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(), Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(), Anomaly::Fixing(n) => if *n < max_retry { AnomalyTransition::Fixing } else { (*self >> Anomaly::OutOfControl).unwrap() }, Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(), } } pub fn diminish(&self) -> AnomalyTransition { (*self >> Anomaly::Negative).unwrap() } } #[derive(PartialEq, Debug, Copy, Clone)] pub enum AnomalyTransition { Normal, // Negative -> Negative Occurred, // Negative -> Positive Unhandled, // Positive -> Positive Disappeared, // Positive | OutOfControl -> Negative Fixed, // Fixing(_) -> Negative Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1) GaveUp, // Fixing(m) -> OutOfControl HasGivenUp // OutOfControl -> OutOfControl } use std::ops::Shr; use std::ops::ShrAssign; impl Shr for Anomaly { type Output = Option<AnomalyTransition>; fn shr(self, rhs: Self) -> Option<AnomalyTransition> { match (self, rhs) { (
{ let next = self.counter; self.counter += 1; T::build(SprinklerOptions { _id: next, _hostname: hostname, ..self.params.clone() }) }
identifier_body
lib.rs
Update read buffer fn check(&mut self) -> Poll<(), std::io::Error> { loop { // Why do I have a loop here? I forgot?? self.read_buffer.reserve(512); let n = try_ready!(self.socket.read_buf(&mut self.read_buffer)); if n == 0 { return Ok(Async::Ready(())); } } } } /// Encode a message and place it in a write buffer pub fn compose_message(from: usize, msg: String) -> BytesMut { let mut write_buffer = BytesMut::new(); write_buffer.reserve(512); write_buffer.put_u16_be(from as u16); write_buffer.put_i64_be(chrono::Local::now().timestamp()); write_buffer.put_u16_be(msg.len() as u16); write_buffer.put(msg); write_buffer } /// Message header #[derive(Clone, Debug)] pub struct SprinklerProtoHeader { id: u16, timestamp: i64, len: u16 } #[cfg(feature = "master")] impl Stream for SprinklerProto { type Item = SprinklerProtoHeader; type Error = std::io::Error; fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { let sock_closed = self.check()?.is_ready(); if self.read_buffer.len() > 12 { Ok(Async::Ready(Some(SprinklerProtoHeader { id: BigEndian::read_u16(&self.read_buffer.split_to(2)), timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64, len: BigEndian::read_u16(&self.read_buffer.split_to(2)) }))) } else { if sock_closed { Ok(Async::Ready(None)) } else { Ok(Async::NotReady) } } } } #[derive(Clone)] pub enum Transmitter<T> { /// Synchronous Sender Synchronous(std::sync::mpsc::Sender<T>), /// Asynchronous Sender Asynchronous(futures::sync::mpsc::Sender<T>) } impl<T> Transmitter<T> where T: 'static + Send { /// Send a message through the underlying Sender pub fn send(&self, t: T) -> Result<(), ()> { match self { Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()), Transmitter::Asynchronous(sender) => { tokio::spawn({ let sender = sender.clone(); sender.send(t).into_future().map(|_| ()).map_err(|_| ()) }); Ok(()) } } } } #[derive(Clone)] pub struct Switch { pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>> } impl Switch { pub fn new() -> Self { Switch { inner: Arc::new(Mutex::new(HashMap::new())) } } pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) { let mut switch_init = self.inner.lock().unwrap(); for i in sprinklers { match i.activate_master() { ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); }, ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); } } } } } /// Message relay between master threads and TCP sockets connected to remote agents #[cfg(feature = "master")] pub struct SprinklerRelay { pub proto: SprinklerProto, pub header: SprinklerProtoHeader, pub switch: Switch } #[cfg(feature = "master")] impl Future for SprinklerRelay { type Item = (); type Error = std::io::Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { let sock_closed = self.proto.check()?.is_ready(); if self.proto.read_buffer.len() >= self.header.len as usize { if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) { if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) { if let Err(_) = tx.send(Message{ timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0), body: msgbody }) { warn!("Failed to relay the message."); } } Ok(Async::Ready(())) } else { warn!("Failed to decode message."); Ok(Async::Ready(())) } } else { if sock_closed { warn!("Message was lost."); Ok(Async::Ready(())) } else { Ok(Async::NotReady) } } } } pub enum ActivationResult { /// A realtime algorithm based master thread that monitors agent threads RealtimeMonitor(std::sync::mpsc::Sender<Message>), /// An asynchronous master thread that monitors agent threads AsyncMonitor(futures::sync::mpsc::Sender<Message>) } /// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id. /// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads, /// so that there will not be a single point of failure. /// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies. /// The systemwide configuration is done by replicating the same config file and executable. pub trait Sprinkler { /// Build a new sprinkler fn build(options: SprinklerOptions) -> Self where Self: Sized; /// Get systemwide id fn id(&self) -> usize; /// Get the hostname, where the agent would be deployed fn hostname(&self) -> &str; /// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel fn activate_master(&self) -> ActivationResult; /// Start the agent thread fn activate_agent(&self); /// Kill the master thread. Note: there is no way to reach out and kill any agent threads. fn deactivate(&self); } /// Sprinkler thread level message format #[derive(Clone)] pub struct Message { pub timestamp: NaiveDateTime, pub body: String } #[derive(PartialEq, Debug, Copy, Clone)] pub enum Anomaly { Negative, // No anomaly has been detected Positive, // Anomaly has occurred Fixing(usize), // Has attempted to intervene N times OutOfControl // Has given up trying because the programmed strategy will not work } impl Anomaly { pub fn get_retry_unchecked(&self) -> usize { match self { Anomaly::Negative | Anomaly::Positive => 0, Anomaly::Fixing(n) => *n, Anomaly::OutOfControl => std::usize::MAX } } pub fn escalate(&self, max_retry: usize) -> AnomalyTransition { match self { Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(), Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(), Anomaly::Fixing(n) => if *n < max_retry { AnomalyTransition::Fixing } else { (*self >> Anomaly::OutOfControl).unwrap() }, Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(), } } pub fn
(&self) -> AnomalyTransition { (*self >> Anomaly::Negative).unwrap() } } #[derive(PartialEq, Debug, Copy, Clone)] pub enum AnomalyTransition { Normal, // Negative -> Negative Occurred, // Negative -> Positive Unhandled, // Positive -> Positive Disappeared, // Positive | OutOfControl -> Negative Fixed, // Fixing(_) -> Negative Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1) GaveUp, // Fixing(m) -> OutOfControl HasGivenUp // OutOfControl -> OutOfControl } use std::ops::Shr; use std::ops::ShrAssign; impl Shr for Anomaly { type Output = Option<AnomalyTransition>; fn shr(self, rhs: Self) -> Option<AnomalyTransition> { match (self, rhs) { (Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal), (Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred), (Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled), (Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared), (Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing), (Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing), (Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed), (An
diminish
identifier_name
lib.rs
TOTAL_SYSTEM_MEMORY), format_size(total_memory) ) .unwrap(); } if total_swap > 0 { write!( buf, " swap={}->{}", format_size(*TOTAL_SYSTEM_SWAP), format_size(total_swap) ) .unwrap(); } if nr_cpus > 0 { write!(buf, " cpus={}->{}", *NR_SYSTEM_CPUS, nr_cpus).unwrap(); } if buf.len() > 0 { info!("System configuration overrides:{}", &buf); } } pub fn to_gb<T>(size: T) -> f64 where T: num::ToPrimitive, { let size_f64 = size.to_f64().unwrap(); size_f64 / (1 << 30) as f64 } pub fn to_mb<T>(size: T) -> f64 where T: num::ToPrimitive, { let size_f64 = size.to_f64().unwrap(); size_f64 / (1 << 20) as f64 } pub fn to_kb<T>(size: T) -> f64 where T: num::ToPrimitive, { let size_f64 = size.to_f64().unwrap(); size_f64 / (1 << 10) as f64 } pub fn scale_ratio<T>(ratio: f64, (left, mid, right): (T, T, T)) -> T where T: PartialOrd + num::FromPrimitive + num::ToPrimitive, { let (left_f64, mid_f64, right_f64) = ( left.to_f64().unwrap(), mid.to_f64().unwrap(), right.to_f64().unwrap(), ); let v = if ratio < 0.5 { left_f64 + (mid_f64 - left_f64) * ratio / 0.5 } else { mid_f64 + (right_f64 - mid_f64) * (ratio - 0.5) / 0.5 }; num::clamp(T::from_f64(v).unwrap(), left, right) } fn format_size_internal<T>(size: T, zero: &str) -> String where T: num::ToPrimitive, { let format_size_helper = |size: u64, shift: u32, suffix: &str| -> Option<String> { let unit: u64 = 1 << shift; if size < unit { Some(zero.to_string()) } else if size < 100 * unit { Some(format!("{:.1}{}", size as f64 / unit as f64, suffix)) } else if size < 1024 * unit { Some(format!("{:}{}", size / unit, suffix)) } else { None } }; let size = size.to_u64().unwrap(); format_size_helper(size, 0, "B") .or_else(|| format_size_helper(size, 10, "K")) .or_else(|| format_size_helper(size, 20, "M")) .or_else(|| format_size_helper(size, 30, "G")) .or_else(|| format_size_helper(size, 40, "P")) .or_else(|| format_size_helper(size, 50, "E")) .unwrap_or_else(|| "INF".into()) } pub fn format_size<T>(size: T) -> String where T: num::ToPrimitive, { format_size_internal(size, "0") } pub fn format_size_dashed<T>(size: T) -> String where T: num::ToPrimitive, { format_size_internal(size, "-") } fn format_duration_internal(dur: f64, zero: &str) -> String { let format_nsecs_helper = |nsecs: u64, unit: u64, max: u64, suffix: &str| -> Option<String> { if nsecs < unit { Some(zero.to_string()) } else if nsecs < 100 * unit { Some(format!("{:.1}{}", nsecs as f64 / unit as f64, suffix)) } else if nsecs < max * unit { Some(format!("{:}{}", nsecs / unit, suffix)) } else { None } }; let nsecs = (dur * 1_000_000_000.0).round() as u64; format_nsecs_helper(nsecs, 10_u64.pow(0), 1000, "n") .or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(3), 1000, "u")) .or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(6), 1000, "m")) .or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9), 60, "s")) .or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60, 60, "M")) .or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60, 24, "H")) .or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24, 365, "D")) .or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24 * 365, 1000, "Y")) .unwrap_or_else(|| "INF".into()) } pub fn format_duration(dur: f64) -> String { format_duration_internal(dur, "0") } pub fn format_duration_dashed(dur: f64) -> String { format_duration_internal(dur, "-") } fn format_pct_internal(ratio: f64, zero: &str) -> String { if ratio == 0.0 { zero.to_string() } else if ratio > 0.99 && ratio <= 9.99 { format!("{:3.0}", ratio * 100.0) } else if ratio > 9.99 { "INF".into() } else { format!("{:.01}", ratio * 100.0) } } pub fn format_pct(ratio: f64) -> String { format_pct_internal(ratio, "0") } pub fn format_pct_dashed(ratio: f64) -> String { format_pct_internal(ratio, "-") } pub fn parse_duration(input: &str) -> Result<f64> { lazy_static::lazy_static! { static ref UNITS: HashMap<char, f64> = [ ('n', 0.000_000_001), ('u', 0.000_001), ('m', 0.001), ('s', 1.0), ('M', 60.0), ('H', 3600.0), ('D', 3600.0 * 24.0), ('Y', 3600.0 * 24.0 * 365.0), ] .iter() .cloned() .collect(); } let mut num = String::new(); let mut sum = 0.0; for ch in input.chars() { if UNITS.contains_key(&ch) { sum += num.trim().parse::<f64>()? * UNITS[&ch]; num.clear(); } else { num.push(ch); } } if num.trim().len() > 0 { sum += num.trim().parse::<f64>()?; } Ok(sum) } fn is_executable<P: AsRef<Path>>(path_in: P) -> bool { let path = path_in.as_ref(); match path.metadata() { Ok(md) => md.is_file() && md.mode() & 0o111 != 0, Err(_) => false, } } pub fn exe_dir() -> Result<PathBuf> { let mut path = env::current_exe()?; path.pop(); Ok(path) } pub fn find_bin<N: AsRef<OsStr>, P: AsRef<OsStr>>( name_in: N, prepend_in: Option<P>, ) -> Option<PathBuf> { let name = name_in.as_ref(); let mut search = OsString::new(); if let Some(prepend) = prepend_in.as_ref() { search.push(prepend); search.push(":"); } if let Some(dirs) = env::var_os("PATH") { search.push(dirs); } for dir in env::split_paths(&search) {
let mut path = dir.to_owned(); path.push(name); if let Ok(path) = path.canonicalize() { if is_executable(&path
random_line_split
lib.rs
M', 60.0), ('H', 3600.0), ('D', 3600.0 * 24.0), ('Y', 3600.0 * 24.0 * 365.0), ] .iter() .cloned() .collect(); } let mut num = String::new(); let mut sum = 0.0; for ch in input.chars() { if UNITS.contains_key(&ch) { sum += num.trim().parse::<f64>()? * UNITS[&ch]; num.clear(); } else { num.push(ch); } } if num.trim().len() > 0 { sum += num.trim().parse::<f64>()?; } Ok(sum) } fn is_executable<P: AsRef<Path>>(path_in: P) -> bool { let path = path_in.as_ref(); match path.metadata() { Ok(md) => md.is_file() && md.mode() & 0o111 != 0, Err(_) => false, } } pub fn exe_dir() -> Result<PathBuf> { let mut path = env::current_exe()?; path.pop(); Ok(path) } pub fn find_bin<N: AsRef<OsStr>, P: AsRef<OsStr>>( name_in: N, prepend_in: Option<P>, ) -> Option<PathBuf> { let name = name_in.as_ref(); let mut search = OsString::new(); if let Some(prepend) = prepend_in.as_ref() { search.push(prepend); search.push(":"); } if let Some(dirs) = env::var_os("PATH") { search.push(dirs); } for dir in env::split_paths(&search) { let mut path = dir.to_owned(); path.push(name); if let Ok(path) = path.canonicalize() { if is_executable(&path) { return Some(path); } } } None } pub fn chgrp<P: AsRef<Path>>(path_in: P, gid: u32) -> Result<bool> { let path = path_in.as_ref(); let md = fs::metadata(path)?; if md.st_gid() != gid { let cpath = CString::new(path.as_os_str().as_bytes())?; if unsafe { libc::chown(cpath.as_ptr(), md.st_uid(), gid) } < 0 { bail!("Failed to chgrp {:?} to {} ({:?})", path, gid, unsafe { *libc::__errno_location() }); } Ok(true) } else { Ok(false) } } pub fn set_sgid<P: AsRef<Path>>(path_in: P) -> Result<bool> { let path = path_in.as_ref(); let md = fs::metadata(path)?; let mut perm = md.permissions(); if perm.mode() & 0o2000 == 0 { perm.set_mode(perm.mode() | 0o2000); fs::set_permissions(path, perm)?; Ok(true) } else { Ok(false) } } pub fn read_one_line<P: AsRef<Path>>(path: P) -> Result<String> { let f = fs::OpenOptions::new().read(true).open(path)?; let r = BufReader::new(f); Ok(r.lines().next().ok_or(anyhow!("File empty"))??) } pub fn write_one_line<P: AsRef<Path>>(path: P, line: &str) -> Result<()> { let mut f = fs::OpenOptions::new().write(true).open(path)?; Ok(f.write_all(line.as_ref())?) } pub fn unix_now() -> u64 { UNIX_EPOCH.elapsed().unwrap().as_secs() } pub fn init_logging(verbosity: u32) { if std::env::var("RUST_LOG").is_ok() { env_logger::init(); } else { let sl_level = match verbosity { 0 | 1 => sl::LevelFilter::Info, 2 => sl::LevelFilter::Debug, _ => sl::LevelFilter::Trace, }; let mut lcfg = sl::ConfigBuilder::new(); lcfg.set_time_level(sl::LevelFilter::Off) .set_location_level(sl::LevelFilter::Off) .set_target_level(sl::LevelFilter::Off) .set_thread_level(sl::LevelFilter::Off); if !console::user_attended_stderr() || sl::TermLogger::init(sl_level, lcfg.build(), sl::TerminalMode::Stderr).is_err() { sl::SimpleLogger::init(sl_level, lcfg.build()).unwrap(); } } } pub fn child_reader_thread(name: String, stdout: process::ChildStdout, tx: Sender<String>) { let reader = BufReader::new(stdout); for line in reader.lines() { match line { Ok(line) => { if let Err(e) = tx.send(line) { info!("{}: Reader thread terminating ({:?})", &name, &e); break; } } Err(e) => { warn!("{}: Failed to read from journalctl ({:?})", &name, &e); break; } } } } pub fn run_command(cmd: &mut Command, emsg: &str) -> Result<()> { let cmd_str = format!("{:?}", &cmd); match cmd.status() { Ok(rc) if rc.success() => Ok(()), Ok(rc) => bail!("{:?} ({:?}): {}", &cmd_str, &rc, emsg,), Err(e) => bail!("{:?} ({:?}): {}", &cmd_str, &e, emsg,), } } pub fn fill_area_with_random<T, R: rand::Rng + ?Sized>(area: &mut [T], comp: f64, rng: &mut R) { let area = unsafe { std::slice::from_raw_parts_mut( std::mem::transmute::<*mut T, *mut u64>(area.as_mut_ptr()), area.len() * size_of::<T>() / size_of::<u64>(), ) }; const BLOCK_SIZE: usize = 512; const WORDS_PER_BLOCK: usize = BLOCK_SIZE / size_of::<u64>(); let rands_per_block = (((WORDS_PER_BLOCK as f64) * (1.0 - comp)) as usize).min(WORDS_PER_BLOCK); let last_first = area[0]; for i in 0..area.len() { area[i] = if i % WORDS_PER_BLOCK < rands_per_block { rng.gen() } else { 0 }; } // guarantee that the first word doesn't stay the same if area[0] == last_first { area[0] += 1; } } pub fn read_cgroup_flat_keyed_file(path: &str) -> Result<HashMap<String, u64>> { let f = fs::OpenOptions::new().read(true).open(path)?; let r = BufReader::new(f); let mut map = HashMap::new(); for line in r.lines().filter_map(Result::ok) { if let Ok((key, val)) = scan_fmt!(&line, "{} {d}", String, u64) { map.insert(key, val); } } Ok(map) } pub fn read_cgroup_nested_keyed_file( path: &str, ) -> Result<HashMap<String, HashMap<String, String>>> { let f = fs::OpenOptions::new().read(true).open(path)?; let r = BufReader::new(f); let mut top_map = HashMap::new(); for line in r.lines().filter_map(Result::ok) { let mut split = line.split_whitespace(); let top_key = split.next().unwrap(); let mut map = HashMap::new(); for tok in split { if let Ok((key, val)) = scan_fmt!(tok, "{}={}", String, String) { map.insert(key, val); } } top_map.insert(top_key.into(), map); } Ok(top_map) } struct GlobalProgState { exiting: bool, kick_seq: u64, } lazy_static::lazy_static! { static ref PROG_STATE: Mutex<GlobalProgState> = Mutex::new(GlobalProgState { exiting: false, kick_seq: 1 }); static ref PROG_WAITQ: Condvar = Condvar::new(); } thread_local! { static LOCAL_KICK_SEQ: RefCell<u64> = RefCell::new(0); } pub fn setup_prog_state() { ctrlc::set_handler(move || { info!("SIGINT/TERM received, exiting..."); set_prog_exiting(); }) .expect("Error setting term handler"); } pub fn set_prog_exiting() { PROG_STATE.lock().unwrap().exiting = true; PROG_WAITQ.notify_all(); } pub fn prog_exiting() -> bool { PROG_STATE.lock().unwrap().exiting } pub fn prog_kick() { PROG_STATE.lock().unwrap().kick_seq += 1; PROG_WAITQ.notify_all(); } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum
ProgState
identifier_name
trie.rs
<u8>>) -> Self { let include_dense = K_INCLUDE_DENSE; let sparse_dense = K_SPARSE_DENSE_RATIO; let mut builder = builder::Builder::new(include_dense, sparse_dense); builder.build(&keys); let louds_dense = LoudsDense::new(&builder); let louds_sparse = LoudsSparse::new(&builder); let mut num_keys = 0; for level in 0..louds_sparse.get_height() { num_keys += builder.get_suffix_counts()[level]; } let mut suffix_builder: Vec<Suffix> = vec![ Suffix { contents: Vec::new(), }; num_keys ]; for i in 0..keys.len() { if i != 0 && keys[i] == keys[i - 1] { continue; } let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice()); assert!(key_id < num_keys); let contents = keys[i][level..].to_vec(); suffix_builder[key_id] = Suffix { contents }; } // suffix_builder.sort(); // let mut suffix_ptrs: Vec<usize> = vec![0; num_keys]; // let mut suffixes = vec![]; // let mut prev_suffix = Suffix { // contents: Vec::new(), // key_id: kNotFound, // }; // for i in 0..num_keys { // let curr_suffix = suffix_builder[num_keys - i - 1]; // if curr_suffix.contents.len() == 0 { // suffix_ptrs[curr_suffix.key_id] = 0; // continue; // } // let mut num_match = 0; // while num_match < curr_suffix.contents.len() // && num_match < prev_suffix.contents.len() // && prev_suffix.contents[num_match] == curr_suffix.contents[num_match] // { // num_match += 1; // } // if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 { // suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match) // } else { // suffix_ptrs[curr_suffix.key_id] = suffixes.len(); // suffixes.push(curr_suffix); // } // prev_suffix = curr_suffix; // } // let mut suf_bits = 0; // let mut max_ptr = suffixes.len(); // suf_bits += 1; // max_ptr >>= 1; // while max_ptr != 0 { // suf_bits += 1; // max_ptr >>= 1; // } // let suffix_ptrs = return Trie { louds_dense, louds_sparse, suffixes: suffix_builder, } } fn traverse( louds_dense: &LoudsDense, louds_sparse: &LoudsSparse, key: &key_t, ) -> (position_t, level_t) { let ret = louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_NOT_FOUND { return louds_sparse.find_key(key, ret.2); } return (ret.0, ret.1); } fn _traverse( &self, key: &key_t, ) -> (position_t, level_t) { let ret = self.louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_NOT_FOUND { return self.louds_sparse.find_key(key, ret.2); } return (ret.0, ret.1); } pub fn exact_search(&self, key: &key_t) -> position_t { let (key_id, level) = self._traverse(key); if key_id == K_NOT_FOUND { return K_NOT_FOUND } let suffix = &self.suffixes[key_id].contents; let length = key.len() - level; if length != suffix.len() { return K_NOT_FOUND } for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) { if cur_key != cur_suf { return K_NOT_FOUND } } return key_id } // // 見つかったかどうか,直前の探索のログを返したい. // fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t { // let diff_level = self.find_different_level(previous_key, key); // let (key_id, level) = // if diff_level < self.louds_sparse.get_start_level() { // let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level); // if ret.0 != K_NOT_FOUND { // (ret.0, ret.1) // } else if ret.2 != K_NOT_FOUND { // self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level) // } else { // (ret.0, ret.1) // } // } else { // self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level) // }; // } // fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t { // let mut diff_level = 0; // for (p, k) in pre_key.iter().zip(key) { // if p != k { // return diff_level // } else { // diff_level += 1; // } // } // return diff_level // } // time_range is depends on encoding specification pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool { let mut sequnce_count = 0; let th = TrajectoryHash::new(7, 20, 16); for key in keys.iter() { // let result = self.exact_search(&key); // let is_find = result != K_NOT_FOUND; let is_find = self.accurate_search(key, &th); if is_find { sequnce_count += 1; if sequnce_count >= time_range { return true } } else { sequnce_count = 0; } } return false } pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool { let neighbors = self.get_neighbors(key, th); for nei in neighbors { if self.exact_search(nei.as_slice()) != K_NOT_FOUND { return true } } false } pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> { let mut vec = Vec::with_capacity(EXTEND_NUMBER); let value: u128 = read_be_u128(key); // tiles to hash values for position in ACCURATE_GRID { let bytes = u128_to_bytes(th.calc(value, position), th.byte_length); vec.push(bytes); } vec } } pub struct TrajectoryHash { byte_length: usize, pub mask_lists: [Vec<u128>; 3], // ascend order } impl TrajectoryHash { pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self { let mut geo_lng_mask = 0b100u128; let mut geo_lat_mask = 0b010u128; let mut time_mask = 0b001u128; let diff = (geo_length as i32) - (time_length as i32); let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()]; if diff >= 0 { for _ in 0..time_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask_lists[2].push(time_mask); time_mask <<= 3; } geo_lng_mask >>= 3; geo_lng_mask <<= 2; geo_lat_mask >>= 3; geo_lat_mask <<= 2; for _ in 0..diff { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 2; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 2; } } else { for _ in 0..geo_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask
ec<Vec
identifier_name
trie.rs
sparse_dense = K_SPARSE_DENSE_RATIO; let mut builder = builder::Builder::new(include_dense, sparse_dense); builder.build(&keys); let louds_dense = LoudsDense::new(&builder); let louds_sparse = LoudsSparse::new(&builder); let mut num_keys = 0; for level in 0..louds_sparse.get_height() { num_keys += builder.get_suffix_counts()[level]; } let mut suffix_builder: Vec<Suffix> = vec![ Suffix { contents: Vec::new(), }; num_keys ]; for i in 0..keys.len() { if i != 0 && keys[i] == keys[i - 1] { continue; } let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice()); assert!(key_id < num_keys); let contents = keys[i][level..].to_vec(); suffix_builder[key_id] = Suffix { contents }; } // suffix_builder.sort(); // let mut suffix_ptrs: Vec<usize> = vec![0; num_keys]; // let mut suffixes = vec![]; // let mut prev_suffix = Suffix { // contents: Vec::new(), // key_id: kNotFound, // }; // for i in 0..num_keys { // let curr_suffix = suffix_builder[num_keys - i - 1]; // if curr_suffix.contents.len() == 0 { // suffix_ptrs[curr_suffix.key_id] = 0; // continue; // } // let mut num_match = 0; // while num_match < curr_suffix.contents.len() // && num_match < prev_suffix.contents.len() // && prev_suffix.contents[num_match] == curr_suffix.contents[num_match] // { // num_match += 1; // } // if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 { // suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match) // } else { // suffix_ptrs[curr_suffix.key_id] = suffixes.len(); // suffixes.push(curr_suffix); // } // prev_suffix = curr_suffix; // } // let mut suf_bits = 0; // let mut max_ptr = suffixes.len(); // suf_bits += 1; // max_ptr >>= 1; // while max_ptr != 0 { // suf_bits += 1; // max_ptr >>= 1; // } // let suffix_ptrs = return Trie { louds_dense, louds_sparse, suffixes: suffix_builder, } } fn traverse( louds_dense: &LoudsDense, louds_sparse: &LoudsSparse, key: &key_t, ) -> (position_t, level_t) { let ret = louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_N
d_key(key, ret.2); } return (ret.0, ret.1); } fn _traverse( &self, key: &key_t, ) -> (position_t, level_t) { let ret = self.louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_NOT_FOUND { return self.louds_sparse.find_key(key, ret.2); } return (ret.0, ret.1); } pub fn exact_search(&self, key: &key_t) -> position_t { let (key_id, level) = self._traverse(key); if key_id == K_NOT_FOUND { return K_NOT_FOUND } let suffix = &self.suffixes[key_id].contents; let length = key.len() - level; if length != suffix.len() { return K_NOT_FOUND } for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) { if cur_key != cur_suf { return K_NOT_FOUND } } return key_id } // // 見つかったかどうか,直前の探索のログを返したい. // fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t { // let diff_level = self.find_different_level(previous_key, key); // let (key_id, level) = // if diff_level < self.louds_sparse.get_start_level() { // let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level); // if ret.0 != K_NOT_FOUND { // (ret.0, ret.1) // } else if ret.2 != K_NOT_FOUND { // self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level) // } else { // (ret.0, ret.1) // } // } else { // self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level) // }; // } // fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t { // let mut diff_level = 0; // for (p, k) in pre_key.iter().zip(key) { // if p != k { // return diff_level // } else { // diff_level += 1; // } // } // return diff_level // } // time_range is depends on encoding specification pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool { let mut sequnce_count = 0; let th = TrajectoryHash::new(7, 20, 16); for key in keys.iter() { // let result = self.exact_search(&key); // let is_find = result != K_NOT_FOUND; let is_find = self.accurate_search(key, &th); if is_find { sequnce_count += 1; if sequnce_count >= time_range { return true } } else { sequnce_count = 0; } } return false } pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool { let neighbors = self.get_neighbors(key, th); for nei in neighbors { if self.exact_search(nei.as_slice()) != K_NOT_FOUND { return true } } false } pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> { let mut vec = Vec::with_capacity(EXTEND_NUMBER); let value: u128 = read_be_u128(key); // tiles to hash values for position in ACCURATE_GRID { let bytes = u128_to_bytes(th.calc(value, position), th.byte_length); vec.push(bytes); } vec } } pub struct TrajectoryHash { byte_length: usize, pub mask_lists: [Vec<u128>; 3], // ascend order } impl TrajectoryHash { pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self { let mut geo_lng_mask = 0b100u128; let mut geo_lat_mask = 0b010u128; let mut time_mask = 0b001u128; let diff = (geo_length as i32) - (time_length as i32); let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()]; if diff >= 0 { for _ in 0..time_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask_lists[2].push(time_mask); time_mask <<= 3; } geo_lng_mask >>= 3; geo_lng_mask <<= 2; geo_lat_mask >>= 3; geo_lat_mask <<= 2; for _ in 0..diff { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 2; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 2; } } else { for _ in 0..geo_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask_lists[2].push(time_mask); time_mask <<= 3; }
OT_FOUND { return louds_sparse.fin
conditional_block
trie.rs
0; // continue; // } // let mut num_match = 0; // while num_match < curr_suffix.contents.len() // && num_match < prev_suffix.contents.len() // && prev_suffix.contents[num_match] == curr_suffix.contents[num_match] // { // num_match += 1; // } // if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 { // suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match) // } else { // suffix_ptrs[curr_suffix.key_id] = suffixes.len(); // suffixes.push(curr_suffix); // } // prev_suffix = curr_suffix; // } // let mut suf_bits = 0; // let mut max_ptr = suffixes.len(); // suf_bits += 1; // max_ptr >>= 1; // while max_ptr != 0 { // suf_bits += 1; // max_ptr >>= 1; // } // let suffix_ptrs = return Trie { louds_dense, louds_sparse, suffixes: suffix_builder, } } fn traverse( louds_dense: &LoudsDense, louds_sparse: &LoudsSparse, key: &key_t, ) -> (position_t, level_t) { let ret = louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_NOT_FOUND { return louds_sparse.find_key(key, ret.2); } return (ret.0, ret.1); } fn _traverse( &self, key: &key_t, ) -> (position_t, level_t) { let ret = self.louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_NOT_FOUND { return self.louds_sparse.find_key(key, ret.2); } return (ret.0, ret.1); } pub fn exact_search(&self, key: &key_t) -> position_t { let (key_id, level) = self._traverse(key); if key_id == K_NOT_FOUND { return K_NOT_FOUND } let suffix = &self.suffixes[key_id].contents; let length = key.len() - level; if length != suffix.len() { return K_NOT_FOUND } for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) { if cur_key != cur_suf { return K_NOT_FOUND } } return key_id } // // 見つかったかどうか,直前の探索のログを返したい. // fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t { // let diff_level = self.find_different_level(previous_key, key); // let (key_id, level) = // if diff_level < self.louds_sparse.get_start_level() { // let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level); // if ret.0 != K_NOT_FOUND { // (ret.0, ret.1) // } else if ret.2 != K_NOT_FOUND { // self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level) // } else { // (ret.0, ret.1) // } // } else { // self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level) // }; // } // fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t { // let mut diff_level = 0; // for (p, k) in pre_key.iter().zip(key) { // if p != k { // return diff_level // } else { // diff_level += 1; // } // } // return diff_level // } // time_range is depends on encoding specification pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool { let mut sequnce_count = 0; let th = TrajectoryHash::new(7, 20, 16); for key in keys.iter() { // let result = self.exact_search(&key); // let is_find = result != K_NOT_FOUND; let is_find = self.accurate_search(key, &th); if is_find { sequnce_count += 1; if sequnce_count >= time_range { return true } } else { sequnce_count = 0; } } return false } pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool { let neighbors = self.get_neighbors(key, th); for nei in neighbors { if self.exact_search(nei.as_slice()) != K_NOT_FOUND { return true } } false } pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> { let mut vec = Vec::with_capacity(EXTEND_NUMBER); let value: u128 = read_be_u128(key); // tiles to hash values for position in ACCURATE_GRID { let bytes = u128_to_bytes(th.calc(value, position), th.byte_length); vec.push(bytes); } vec } } pub struct TrajectoryHash { byte_length: usize, pub mask_lists: [Vec<u128>; 3], // ascend order } impl TrajectoryHash { pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self { let mut geo_lng_mask = 0b100u128; let mut geo_lat_mask = 0b010u128; let mut time_mask = 0b001u128; let diff = (geo_length as i32) - (time_length as i32); let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()]; if diff >= 0 { for _ in 0..time_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask_lists[2].push(time_mask); time_mask <<= 3; } geo_lng_mask >>= 3; geo_lng_mask <<= 2; geo_lat_mask >>= 3; geo_lat_mask <<= 2; for _ in 0..diff { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 2; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 2; } } else { for _ in 0..geo_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask_lists[2].push(time_mask); time_mask <<= 3; } for _ in 0..(-diff) { mask_lists[2].push(time_mask); time_mask <<= 1; } } TrajectoryHash { byte_length, mask_lists } } pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 { let mut updated = value; for (dimension, direction) in pos.iter().enumerate() { match direction { -1 => { for mask in self.mask_lists[dimension].iter() { if value & mask != 0 { updated &= !mask; break; } else { updated |= mask; } } }, 0 => {}, 1 => { for mask in self.mask_lists[dimension].iter() { if value & mask == 0 { updated |= mask; break; } else { updated &= !mask; } } }, _ => panic!("invalid value of direction!") } } updated } } fn read_be_u128(input: &[u8]) -> u128 { let mut output = 0u128; let digit = input.len() - 1; for (i, byte) in input.iter().enumerate() { output |= (*byte as u128) << 8*(digit - i); } output } fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> { value.to_be_bytes()[16-byte_length..].to_vec() }
identifier_body
trie.rs
!= 0 && keys[i] == keys[i - 1] { continue; } let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice()); assert!(key_id < num_keys); let contents = keys[i][level..].to_vec(); suffix_builder[key_id] = Suffix { contents }; } // suffix_builder.sort(); // let mut suffix_ptrs: Vec<usize> = vec![0; num_keys]; // let mut suffixes = vec![]; // let mut prev_suffix = Suffix { // contents: Vec::new(), // key_id: kNotFound, // }; // for i in 0..num_keys { // let curr_suffix = suffix_builder[num_keys - i - 1]; // if curr_suffix.contents.len() == 0 { // suffix_ptrs[curr_suffix.key_id] = 0; // continue; // } // let mut num_match = 0; // while num_match < curr_suffix.contents.len() // && num_match < prev_suffix.contents.len() // && prev_suffix.contents[num_match] == curr_suffix.contents[num_match] // { // num_match += 1; // } // if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 { // suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match) // } else { // suffix_ptrs[curr_suffix.key_id] = suffixes.len(); // suffixes.push(curr_suffix); // } // prev_suffix = curr_suffix; // } // let mut suf_bits = 0; // let mut max_ptr = suffixes.len(); // suf_bits += 1; // max_ptr >>= 1; // while max_ptr != 0 { // suf_bits += 1; // max_ptr >>= 1; // } // let suffix_ptrs = return Trie { louds_dense, louds_sparse, suffixes: suffix_builder, } } fn traverse( louds_dense: &LoudsDense, louds_sparse: &LoudsSparse, key: &key_t, ) -> (position_t, level_t) { let ret = louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_NOT_FOUND { return louds_sparse.find_key(key, ret.2); } return (ret.0, ret.1); } fn _traverse( &self, key: &key_t, ) -> (position_t, level_t) { let ret = self.louds_dense.find_key(key); if ret.0 != K_NOT_FOUND { return (ret.0, ret.1); } if ret.2 != K_NOT_FOUND { return self.louds_sparse.find_key(key, ret.2); } return (ret.0, ret.1); } pub fn exact_search(&self, key: &key_t) -> position_t { let (key_id, level) = self._traverse(key); if key_id == K_NOT_FOUND { return K_NOT_FOUND } let suffix = &self.suffixes[key_id].contents; let length = key.len() - level; if length != suffix.len() { return K_NOT_FOUND } for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) { if cur_key != cur_suf { return K_NOT_FOUND } } return key_id } // // 見つかったかどうか,直前の探索のログを返したい. // fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t { // let diff_level = self.find_different_level(previous_key, key); // let (key_id, level) = // if diff_level < self.louds_sparse.get_start_level() { // let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level); // if ret.0 != K_NOT_FOUND { // (ret.0, ret.1) // } else if ret.2 != K_NOT_FOUND { // self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level) // } else { // (ret.0, ret.1) // } // } else { // self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level) // }; // } // fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t { // let mut diff_level = 0; // for (p, k) in pre_key.iter().zip(key) { // if p != k { // return diff_level // } else { // diff_level += 1; // } // } // return diff_level // } // time_range is depends on encoding specification pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool { let mut sequnce_count = 0; let th = TrajectoryHash::new(7, 20, 16); for key in keys.iter() { // let result = self.exact_search(&key); // let is_find = result != K_NOT_FOUND; let is_find = self.accurate_search(key, &th); if is_find { sequnce_count += 1; if sequnce_count >= time_range { return true } } else { sequnce_count = 0; } } return false } pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool { let neighbors = self.get_neighbors(key, th); for nei in neighbors { if self.exact_search(nei.as_slice()) != K_NOT_FOUND { return true } } false } pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> { let mut vec = Vec::with_capacity(EXTEND_NUMBER); let value: u128 = read_be_u128(key); // tiles to hash values for position in ACCURATE_GRID { let bytes = u128_to_bytes(th.calc(value, position), th.byte_length); vec.push(bytes); } vec } } pub struct TrajectoryHash { byte_length: usize, pub mask_lists: [Vec<u128>; 3], // ascend order } impl TrajectoryHash { pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self { let mut geo_lng_mask = 0b100u128; let mut geo_lat_mask = 0b010u128; let mut time_mask = 0b001u128; let diff = (geo_length as i32) - (time_length as i32); let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()]; if diff >= 0 { for _ in 0..time_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask_lists[2].push(time_mask); time_mask <<= 3; } geo_lng_mask >>= 3; geo_lng_mask <<= 2; geo_lat_mask >>= 3; geo_lat_mask <<= 2; for _ in 0..diff { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 2; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 2; } } else { for _ in 0..geo_length { mask_lists[0].push(geo_lng_mask); geo_lng_mask <<= 3; mask_lists[1].push(geo_lat_mask); geo_lat_mask <<= 3; mask_lists[2].push(time_mask); time_mask <<= 3; } for _ in 0..(-diff) { mask_lists[2].push(time_mask); time_mask <<= 1; } } TrajectoryHash { byte_length, mask_lists } } pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 { let mut updated = value; for (dimension, direction) in pos.iter().enumerate() { match direction { -1 => { for mask in self.mask_lists[dimension].iter() { if value & mask != 0 { updated &= !mask;
break; } else { updated |= mask; }
random_line_split
tau0305.py
].flatten()) #update chunk id step_j += n_j step_i += n_i del X_global, Y_global, out_global for metric in metrics: distances = [] mean_distances = [] max_dis = [] #Split cpu's data to gpus n = int(len(features) / len(gpus)) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] threads = [] from split import split_double #Compute chunks in multi-gpus for p, gpu in enumerate(gpus): whole_distances = [] split = split_double(args, mutilple_features[p]) n = int(len(mutilple_features[p]) / split) chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )] step_i = 0 threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),])) #Number of threads depend on how many gpus you have for t in threads: t.setDaemon(True) t.start() #Re-group final distance data from gpus for t in threads: whole_distances = [] t.join() distances.extend(np.array(whole_distances).flatten()) #Process data random_distances = np.array(distances).flatten() random_distances = random_distances.reshape((random_distances.shape[0], 1)).T mean_distances = np.mean(mean_distances) print("mean_distances: ",mean_distances) print("max dis:", max(max_dis))#original max dis before EVT ################################################################### ######################################################################################## print("Finding Nearest Points......") #Find nearest points on GPUs from gpu_functions import gpu_nearest nearest_cluster = np.zeros((len(features)), dtype = 'int') nearest_points_dis = np.zeros((len(features))) n = int(len(features) / len(gpus)) features = list(features) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_features) > len(gpus): mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)]) del mutilple_features[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_features[0:len(gpus)]): ind.append(range(step, len(j)+step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis])) thread(threads) del mutilple_features # In round 1 the centroids is the points no matter what's linkage nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))] nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first nearest_points = nearest_cluster ######################################################################################## print("Computing the appearance of nearest_points") threadsperblock = 32 blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock) X_global = cuda.to_device(nearest_points) out_global = cuda.device_array((nearest_points.shape[0])) from cuda_kernels import count_appear count_appear[blockspergrid, threadsperblock](X_global, out_global) appear = np.array(out_global.copy_to_host(), dtype = int) appear_count = [[j, i] for i, j in enumerate(appear)] # count the appearance of each kernel points # generate order order = [i[1] for i in sorted(appear_count, reverse=True)] # add non kernel points to order processed = set() init = [] for count, i in enumerate(order): j = nearest_points[i] if i not in processed and j not in processed: init.append([i, j]) processed.add(i) processed.add(j) init = init[0: int(len(init))] N = len(init) init_length = N init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups. ###################################################################################################### print("Finding Nearest Intial Pairs") #Computing nearest centroids on GPUs centroids = [np.mean(i,axis=0) for i in init_features] X = centroids from gpu_functions import gpu_nearest_init_centroids gs = np.zeros((len(init_features))) nearest_init = np.zeros((len(init_features)), dtype = 'int') n = int(len(centroids) / len(gpus)) mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_centroids) > len(gpus): mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)]) del mutilple_centroids[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_centroids[0:len(gpus)]): ind.append(range(step, len(j) + step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init])) thread(threads) del mutilple_centroids ########################################################################################################## #Nearest initial pairs combo nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)] ########################################################################################################## gxs = [] print("Computing Gaps") # Computing gaps on GPUs from gpu_functions import gpu_distance for pair1, pair2 in nearest_init_combo: round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair2[0], pair2[1]]]) features0 = [features[k] for k in pair1] #extract features of cluster0 features1 = [features[k] for k in pair2] #extract features of cluster1 centroid0 = np.mean(features0, axis=0) # Get controid of initial pair0 centroid1 = np.mean(features1, axis=0) # Get controid of initial pair1 if metric == "cosine": gx = scipy.spatial.distance.cosine(centroid0, centroid1) elif metric == "euclidean": gx = scipy.spatial.distance.euclidean(centroid0, centroid1) gxs.append(gx) #gaps #Our tau number_of_clusters = 30 thresh = 0.01 tw = weibull.weibull() data = torch.Tensor(gxs) fullrange = torch.linspace(0, 1, 100) tailj = torch.linspace(.45, .55, 10) torch.Tensor.ndim = property(lambda self: len(self.shape)) tw.FitHigh(data.view(1, -1), int(1. * len(data))) parms = tw.return_all_parameters() print(parms) pcent = 1 - 1 / len(data) pcent = 0.99 print("EVT Tau for ", pcent * 100, " Percentile at ", parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor']) # wscoresj = tw.wscore(tailj) # print("Ijbb Wscores=",tailj,wscoresj) wscoresj = tw.wscore(fullrange) tau_T = parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor'] tau_T = tau_T.numpy()[0][0] return 0, T, tau, nearest_points, metric, init_length, nearest_cluster_with_distance_round_1, nearest_points_dis, gx, 0 def nan_to_num(t,mynan=0.): if torch.all(torch.isfinite(t)): return t if len(t.size()) == 0:
return torch.tensor(mynan) return torch.cat([nan_to_num(l).unsqueeze(0) for l in t],0) def get_tau(data,maxval,tailfrac=.25,pcent=.999): #tw = weibull.weibull(translateAmountTensor=.001)
random_line_split
tau0305.py
(X, Y, out): i, j = cuda.grid(2) if i < out.shape[0] and j < out.shape[1]: u = X[i] v = Y[j] out[i, j] = euclidean_gpu(u, v) ##################################################################################### def tau(args, features, gpus): #Now only support Cosine and Euclidean on GPU if args.distance_metric: metrics = [args.distance_metric] else: metrics =['cosine','euclidean'] print("The Distance Metric is: ", metrics) #CUDA parallel distance computing, support multi-gpus def gpu_pairwise_distance(chunks, step_i, gpu): #lock.acquire()#no need to lock threads in this case cuda.select_device(gpu) for i, chunk1 in enumerate(chunks): print("Computing distance chunk: ", i + 1) #Define chunk id x axis step_j = 0 n_i = chunk1.shape[0] for j, chunk2 in enumerate(chunks): #Copy data to gpu X_global = cuda.to_device(chunk1) Y_global = cuda.to_device(chunk2) #Define chunk id y axis n_j = chunk2.shape[0] out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0])) # Define gpu's grid threadsperblock = (16, 16) blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0])) blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1])) blockspergrid = (blockspergrid_x, blockspergrid_y) #Compute distance on gpu if metric == "cosine": cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global) elif metric == "euclidean": euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global) #Find mean and max for each loop mean_distances.append(np.mean(out_global.copy_to_host())) max_dis.append(np.max(out_global.copy_to_host())) #Select 2% points to EVT randomly k = int(len(out_global.copy_to_host()) * 0.02) number_of_rows = out_global.copy_to_host().shape[0] random_indices = np.random.choice(number_of_rows, size=k, replace=False) #Copy gpu distance data to cpu numpy if len(out_global.copy_to_host()[random_indices, :]) > 0: whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten()) #update chunk id step_j += n_j step_i += n_i del X_global, Y_global, out_global for metric in metrics: distances = [] mean_distances = [] max_dis = [] #Split cpu's data to gpus n = int(len(features) / len(gpus)) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] threads = [] from split import split_double #Compute chunks in multi-gpus for p, gpu in enumerate(gpus): whole_distances = [] split = split_double(args, mutilple_features[p]) n = int(len(mutilple_features[p]) / split) chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )] step_i = 0 threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),])) #Number of threads depend on how many gpus you have for t in threads: t.setDaemon(True) t.start() #Re-group final distance data from gpus for t in threads: whole_distances = [] t.join() distances.extend(np.array(whole_distances).flatten()) #Process data random_distances = np.array(distances).flatten() random_distances = random_distances.reshape((random_distances.shape[0], 1)).T mean_distances = np.mean(mean_distances) print("mean_distances: ",mean_distances) print("max dis:", max(max_dis))#original max dis before EVT ################################################################### ######################################################################################## print("Finding Nearest Points......") #Find nearest points on GPUs from gpu_functions import gpu_nearest nearest_cluster = np.zeros((len(features)), dtype = 'int') nearest_points_dis = np.zeros((len(features))) n = int(len(features) / len(gpus)) features = list(features) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_features) > len(gpus): mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)]) del mutilple_features[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_features[0:len(gpus)]): ind.append(range(step, len(j)+step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis])) thread(threads) del mutilple_features # In round 1 the centroids is the points no matter what's linkage nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))] nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first nearest_points = nearest_cluster ######################################################################################## print("Computing the appearance of nearest_points") threadsperblock = 32 blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock) X_global = cuda.to_device(nearest_points) out_global = cuda.device_array((nearest_points.shape[0])) from cuda_kernels import count_appear count_appear[blockspergrid, threadsperblock](X_global, out_global) appear = np.array(out_global.copy_to_host(), dtype = int) appear_count = [[j, i] for i, j in enumerate(appear)] # count the appearance of each kernel points # generate order order = [i[1] for i in sorted(appear_count, reverse=True)] # add non kernel points to order processed = set() init = [] for count, i in enumerate(order): j = nearest_points[i] if i not in processed and j not in processed: init.append([i, j]) processed.add(i) processed.add(j) init = init[0: int(len(init))] N = len(init) init_length = N init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups. ###################################################################################################### print("Finding Nearest Intial Pairs") #Computing nearest centroids on GPUs centroids = [np.mean(i,axis=0) for i in init_features] X = centroids from gpu_functions import gpu_nearest_init_centroids gs = np.zeros((len(init_features))) nearest_init = np.zeros((len(init_features)), dtype = 'int') n = int(len(centroids) / len(gpus)) mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_centroids) > len(gpus): mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)]) del mutilple_centroids[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_centroids[0:len(gpus)]): ind.append(range(step, len(j) + step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init])) thread(threads) del mutilple_centroids ########################################################################################################## #Nearest initial pairs combo nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)] ########################################################################################################## gxs = [] print("Computing Gaps") # Computing gaps on GPUs from gpu_functions import gpu_distance for pair1, pair2 in nearest_init_combo: round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair
euclidean_dis_gpu
identifier_name
tau0305.py
@cuda.jit def euclidean_dis_gpu(X, Y, out): i, j = cuda.grid(2) if i < out.shape[0] and j < out.shape[1]: u = X[i] v = Y[j] out[i, j] = euclidean_gpu(u, v) ##################################################################################### def tau(args, features, gpus): #Now only support Cosine and Euclidean on GPU if args.distance_metric: metrics = [args.distance_metric] else: metrics =['cosine','euclidean'] print("The Distance Metric is: ", metrics) #CUDA parallel distance computing, support multi-gpus def gpu_pairwise_distance(chunks, step_i, gpu): #lock.acquire()#no need to lock threads in this case cuda.select_device(gpu) for i, chunk1 in enumerate(chunks): print("Computing distance chunk: ", i + 1) #Define chunk id x axis step_j = 0 n_i = chunk1.shape[0] for j, chunk2 in enumerate(chunks): #Copy data to gpu X_global = cuda.to_device(chunk1) Y_global = cuda.to_device(chunk2) #Define chunk id y axis n_j = chunk2.shape[0] out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0])) # Define gpu's grid threadsperblock = (16, 16) blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0])) blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1])) blockspergrid = (blockspergrid_x, blockspergrid_y) #Compute distance on gpu if metric == "cosine": cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global) elif metric == "euclidean": euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global) #Find mean and max for each loop mean_distances.append(np.mean(out_global.copy_to_host())) max_dis.append(np.max(out_global.copy_to_host())) #Select 2% points to EVT randomly k = int(len(out_global.copy_to_host()) * 0.02) number_of_rows = out_global.copy_to_host().shape[0] random_indices = np.random.choice(number_of_rows, size=k, replace=False) #Copy gpu distance data to cpu numpy if len(out_global.copy_to_host()[random_indices, :]) > 0: whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten()) #update chunk id step_j += n_j step_i += n_i del X_global, Y_global, out_global for metric in metrics: distances = [] mean_distances = [] max_dis = [] #Split cpu's data to gpus n = int(len(features) / len(gpus)) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] threads = [] from split import split_double #Compute chunks in multi-gpus for p, gpu in enumerate(gpus): whole_distances = [] split = split_double(args, mutilple_features[p]) n = int(len(mutilple_features[p]) / split) chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )] step_i = 0 threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),])) #Number of threads depend on how many gpus you have for t in threads: t.setDaemon(True) t.start() #Re-group final distance data from gpus for t in threads: whole_distances = [] t.join() distances.extend(np.array(whole_distances).flatten()) #Process data random_distances = np.array(distances).flatten() random_distances = random_distances.reshape((random_distances.shape[0], 1)).T mean_distances = np.mean(mean_distances) print("mean_distances: ",mean_distances) print("max dis:", max(max_dis))#original max dis before EVT ################################################################### ######################################################################################## print("Finding Nearest Points......") #Find nearest points on GPUs from gpu_functions import gpu_nearest nearest_cluster = np.zeros((len(features)), dtype = 'int') nearest_points_dis = np.zeros((len(features))) n = int(len(features) / len(gpus)) features = list(features) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_features) > len(gpus): mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)]) del mutilple_features[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_features[0:len(gpus)]): ind.append(range(step, len(j)+step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis])) thread(threads) del mutilple_features # In round 1 the centroids is the points no matter what's linkage nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))] nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first nearest_points = nearest_cluster ######################################################################################## print("Computing the appearance of nearest_points") threadsperblock = 32 blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock) X_global = cuda.to_device(nearest_points) out_global = cuda.device_array((nearest_points.shape[0])) from cuda_kernels import count_appear count_appear[blockspergrid, threadsperblock](X_global, out_global) appear = np.array(out_global.copy_to_host(), dtype = int) appear_count = [[j, i] for i, j in enumerate(appear)] # count the appearance of each kernel points # generate order order = [i[1] for i in sorted(appear_count, reverse=True)] # add non kernel points to order processed = set() init = [] for count, i in enumerate(order): j = nearest_points[i] if i not in processed and j not in processed: init.append([i, j]) processed.add(i) processed.add(j) init = init[0: int(len(init))] N = len(init) init_length = N init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups. ###################################################################################################### print("Finding Nearest Intial Pairs") #Computing nearest centroids on GPUs centroids = [np.mean(i,axis=0) for i in init_features] X = centroids from gpu_functions import gpu_nearest_init_centroids gs = np.zeros((len(init_features))) nearest_init = np.zeros((len(init_features)), dtype = 'int') n = int(len(centroids) / len(gpus)) mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_centroids) > len(gpus): mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)]) del mutilple_centroids[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_centroids[0:len(gpus)]): ind.append(range(step, len(j) + step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init])) thread(threads) del mutilple_centroids ########################################################################################################## #Nearest initial pairs combo nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)] ########################################################################################################## gxs = [] print
i, j = cuda.grid(2) if i < out.shape[0] and j < out.shape[1]: u = X[i] v = Y[j] out[i, j] = cosine_gpu(u, v)
identifier_body
tau0305.py
, j = cuda.grid(2) if i < out.shape[0] and j < out.shape[1]: u = X[i] v = Y[j] out[i, j] = euclidean_gpu(u, v) ##################################################################################### def tau(args, features, gpus): #Now only support Cosine and Euclidean on GPU if args.distance_metric: metrics = [args.distance_metric] else: metrics =['cosine','euclidean'] print("The Distance Metric is: ", metrics) #CUDA parallel distance computing, support multi-gpus def gpu_pairwise_distance(chunks, step_i, gpu): #lock.acquire()#no need to lock threads in this case cuda.select_device(gpu) for i, chunk1 in enumerate(chunks): print("Computing distance chunk: ", i + 1) #Define chunk id x axis step_j = 0 n_i = chunk1.shape[0] for j, chunk2 in enumerate(chunks): #Copy data to gpu X_global = cuda.to_device(chunk1) Y_global = cuda.to_device(chunk2) #Define chunk id y axis n_j = chunk2.shape[0] out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0])) # Define gpu's grid threadsperblock = (16, 16) blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0])) blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1])) blockspergrid = (blockspergrid_x, blockspergrid_y) #Compute distance on gpu if metric == "cosine": cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global) elif metric == "euclidean": euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global) #Find mean and max for each loop mean_distances.append(np.mean(out_global.copy_to_host())) max_dis.append(np.max(out_global.copy_to_host())) #Select 2% points to EVT randomly k = int(len(out_global.copy_to_host()) * 0.02) number_of_rows = out_global.copy_to_host().shape[0] random_indices = np.random.choice(number_of_rows, size=k, replace=False) #Copy gpu distance data to cpu numpy if len(out_global.copy_to_host()[random_indices, :]) > 0: whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten()) #update chunk id step_j += n_j step_i += n_i del X_global, Y_global, out_global for metric in metrics: distances = [] mean_distances = [] max_dis = [] #Split cpu's data to gpus n = int(len(features) / len(gpus)) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] threads = [] from split import split_double #Compute chunks in multi-gpus for p, gpu in enumerate(gpus):
#Number of threads depend on how many gpus you have for t in threads: t.setDaemon(True) t.start() #Re-group final distance data from gpus for t in threads: whole_distances = [] t.join() distances.extend(np.array(whole_distances).flatten()) #Process data random_distances = np.array(distances).flatten() random_distances = random_distances.reshape((random_distances.shape[0], 1)).T mean_distances = np.mean(mean_distances) print("mean_distances: ",mean_distances) print("max dis:", max(max_dis))#original max dis before EVT ################################################################### ######################################################################################## print("Finding Nearest Points......") #Find nearest points on GPUs from gpu_functions import gpu_nearest nearest_cluster = np.zeros((len(features)), dtype = 'int') nearest_points_dis = np.zeros((len(features))) n = int(len(features) / len(gpus)) features = list(features) mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_features) > len(gpus): mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)]) del mutilple_features[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_features[0:len(gpus)]): ind.append(range(step, len(j)+step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis])) thread(threads) del mutilple_features # In round 1 the centroids is the points no matter what's linkage nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))] nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first nearest_points = nearest_cluster ######################################################################################## print("Computing the appearance of nearest_points") threadsperblock = 32 blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock) X_global = cuda.to_device(nearest_points) out_global = cuda.device_array((nearest_points.shape[0])) from cuda_kernels import count_appear count_appear[blockspergrid, threadsperblock](X_global, out_global) appear = np.array(out_global.copy_to_host(), dtype = int) appear_count = [[j, i] for i, j in enumerate(appear)] # count the appearance of each kernel points # generate order order = [i[1] for i in sorted(appear_count, reverse=True)] # add non kernel points to order processed = set() init = [] for count, i in enumerate(order): j = nearest_points[i] if i not in processed and j not in processed: init.append([i, j]) processed.add(i) processed.add(j) init = init[0: int(len(init))] N = len(init) init_length = N init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups. ###################################################################################################### print("Finding Nearest Intial Pairs") #Computing nearest centroids on GPUs centroids = [np.mean(i,axis=0) for i in init_features] X = centroids from gpu_functions import gpu_nearest_init_centroids gs = np.zeros((len(init_features))) nearest_init = np.zeros((len(init_features)), dtype = 'int') n = int(len(centroids) / len(gpus)) mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )] if len(gpus) > 1: if len(mutilple_centroids) > len(gpus): mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)]) del mutilple_centroids[len(gpus)] ind = [] step = 0 steps = [] for i, j in enumerate(mutilple_centroids[0:len(gpus)]): ind.append(range(step, len(j) + step)) steps.append(step) step += len(j) threads = [] for p, gpu in enumerate(gpus): threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init])) thread(threads) del mutilple_centroids ########################################################################################################## #Nearest initial pairs combo nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)] ########################################################################################################## gxs = [] print("Computing Gaps") # Computing gaps on GPUs from gpu_functions import gpu_distance for pair1, pair2 in nearest_init_combo: round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair2[0], pair2[1]]])
whole_distances = [] split = split_double(args, mutilple_features[p]) n = int(len(mutilple_features[p]) / split) chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )] step_i = 0 threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),]))
conditional_block
verifier.rs
if bytes.len() % 4 != 1 && bytes.len() > 0 { let mut rv = vec![]; let mut pos = 0; while pos + 4 <= bytes.len() { let s = maybe!(triplet(&bytes[pos..pos + 4])); rv.extend_from_slice(&s); pos += 4; } if bytes.len() - pos == 2 { let a = maybe!(delut(bytes[pos])); let b = maybe!(delut(bytes[pos + 1])); rv.push(a << 2 | b >> 4); } else if bytes.len() - pos == 3 { let a = maybe!(delut(bytes[pos])); let b = maybe!(delut(bytes[pos + 1])); let c = maybe!(delut(bytes[pos + 2])); rv.push(a << 2 | b >> 4); rv.push(b << 4 | c >> 2); } Some(rv) } else { None } } struct Parser<'a> { enc: &'a [u8], pos: usize, } impl<'a> fmt::Debug for Parser<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?; write!(f, "<-- {} -->", self.pos)?; write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?; Ok(()) } } type Parsed<T> = Result<T, usize>; impl<'a> Parser<'a> { fn expect(&mut self, exp: &[u8]) -> Parsed<()> { assert!(self.pos < self.enc.len()); if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()] != exp { self.err() } else { self.pos += exp.len(); Ok(()) } } fn read_until(&mut self, stopchar: u8) -> &'a [u8] { let start = self.pos; let stop = |c: &u8| *c == stopchar; self.pos = match self.enc[self.pos..].iter().position(stop) { None => self.enc.len() - 1, Some(end) => self.pos + end, }; &self.enc[start..self.pos] } fn read_u32(&mut self) -> Parsed<u32> { let is_digit = |c: u8| 48 <= c && c <= 57; let mut end = self.pos; while end < self.enc.len() && is_digit(self.enc[end]) { end += 1; } match str::from_utf8(&self.enc[self.pos..end]) { Err(_) => self.err(), Ok(s) => match s.parse() { Err(_) => self.err(), Ok(n) => { self.pos = end; Ok(n) } }, } } fn read_version(&mut self) -> Parsed<Version> { self.read_u32().and_then(|vers| match vers { 0x10 => Ok(Version::_0x10), 0x13 => Ok(Version::_0x13), _ => self.err(), }) } fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> { let end = self.enc[self.pos..] .iter() .position(|c| char_set.contains(c)) .map(|sub_pos| self.pos + sub_pos) .unwrap_or_else(|| self.enc.len()); match debase64_no_pad(&self.enc[self.pos..end]) { None => self.err(), Some(rv) => { self.pos = end; Ok(rv) } } } fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> { let end = match stopchar { None => self.enc.len(), Some(c) => { self.enc[self.pos..] .iter() .take_while(|k| **k != c) .fold(0, |c, _| c + 1) + self.pos } }; match debase64_no_pad(&self.enc[self.pos..end]) { None => self.err(), Some(rv) => { self.pos = end; Ok(rv) } } } fn err<T>(&self) -> Parsed<T> { Err(self.pos) } } #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum DecodeError { /// Byte position of first parse error ParseError(usize), /// Invalid Argon2 parameters given in encoding InvalidParams(ParamErr), } impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::DecodeError::*; match *self { ParseError(pos) => write!(f, "Parse error at position {}", pos), InvalidParams(ref perr) => { write!(f, "Invalid hash parameters given by encoded: {}", perr) } } } } impl Error for DecodeError { fn description(&self) -> &str { match *self { DecodeError::ParseError(_) => "Hash string parse error.", DecodeError::InvalidParams(ref perr) => perr.description(), } } } /// Represents a single Argon2 hashing session. A hash session comprises of the /// hash algorithm parameters, salt, key, and data used to hash a given input. #[derive(Debug, Eq, PartialEq)] pub struct Encoded { params: Argon2, hash: Vec<u8>, salt: Vec<u8>, key: Vec<u8>, data: Vec<u8>, } type Packed = ( Variant, Version, u32, u32, u32, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, ); impl Encoded { fn parse(encoded: &[u8]) -> Result<Packed, usize> { let mut p = Parser { enc: encoded, pos: 0, }; p.expect(b"$argon2")?; let variant = match p.read_until('$' as u8) { b"d" => Variant::Argon2d, b"i" => Variant::Argon2i, b"id" => Variant::Argon2id, x => return Err(p.pos - x.len()), }; p.expect(b"$")?; let vers = match p.expect(b"v=") { // Match the c reference impl's behavior, which defaults to a v0x10 // hash encoding since the `v=` field was only introduced with // v0x13. Err(_) => Version::_0x10, Ok(()) => { let vers = p.read_version()?; p.expect(b",")?; vers } }; p.expect(b"m=")?; let kib = p.read_u32()?; p.expect(b",t=")?; let passes = p.read_u32()?; p.expect(b",p=")?; let lanes = p.read_u32()?; let key = match p.expect(b",keyid=") { Err(_) => vec![], Ok(()) => p.decode64_till_one_of(b",$")?, }; let data = match p.expect(b",data=") { Ok(()) => p.decode64_till(Some(b'$'))?, Err(_) => vec![], }; p.expect(b"$")?; let salt = p.decode64_till(Some(b'$'))?; p.expect(b"$")?; let hash = p.decode64_till(None)?; Ok((variant, vers, kib, passes, lanes, key, data, salt, hash)) } /// Reconstruct a previous hash session from serialized bytes. pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> { match Self::parse(encoded) { Err(pos) => Err(DecodeError::ParseError(pos)), Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => { match Argon2::with_version(passes, lanes, kib, v, vers) { Err(e) => Err(DecodeError::InvalidParams(e)), Ok(a2) => Ok(Encoded { params: a2, hash: hash, salt: salt, key: key, data: data, }), } } } } /// Serialize this hashing session into raw bytes that can later be /// recovered by `Encoded::from_u8`. pub fn to_u8(&self) -> Vec<u8> { let vcode = |v| match v { Variant::Argon2i => "i", Variant::Argon2d => "d", Variant::Argon2id => "id", }; let b64 = |x
rv } fn debase64_no_pad(bytes: &[u8]) -> Option<Vec<u8>> {
random_line_split
verifier.rs
} Some(rv) } else { None } } struct Parser<'a> { enc: &'a [u8], pos: usize, } impl<'a> fmt::Debug for Parser<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?; write!(f, "<-- {} -->", self.pos)?; write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?; Ok(()) } } type Parsed<T> = Result<T, usize>; impl<'a> Parser<'a> { fn expect(&mut self, exp: &[u8]) -> Parsed<()> { assert!(self.pos < self.enc.len()); if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()] != exp { self.err() } else { self.pos += exp.len(); Ok(()) } } fn read_until(&mut self, stopchar: u8) -> &'a [u8] { let start = self.pos; let stop = |c: &u8| *c == stopchar; self.pos = match self.enc[self.pos..].iter().position(stop) { None => self.enc.len() - 1, Some(end) => self.pos + end, }; &self.enc[start..self.pos] } fn read_u32(&mut self) -> Parsed<u32> { let is_digit = |c: u8| 48 <= c && c <= 57; let mut end = self.pos; while end < self.enc.len() && is_digit(self.enc[end]) { end += 1; } match str::from_utf8(&self.enc[self.pos..end]) { Err(_) => self.err(), Ok(s) => match s.parse() { Err(_) => self.err(), Ok(n) => { self.pos = end; Ok(n) } }, } } fn read_version(&mut self) -> Parsed<Version> { self.read_u32().and_then(|vers| match vers { 0x10 => Ok(Version::_0x10), 0x13 => Ok(Version::_0x13), _ => self.err(), }) } fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> { let end = self.enc[self.pos..] .iter() .position(|c| char_set.contains(c)) .map(|sub_pos| self.pos + sub_pos) .unwrap_or_else(|| self.enc.len()); match debase64_no_pad(&self.enc[self.pos..end]) { None => self.err(), Some(rv) => { self.pos = end; Ok(rv) } } } fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> { let end = match stopchar { None => self.enc.len(), Some(c) => { self.enc[self.pos..] .iter() .take_while(|k| **k != c) .fold(0, |c, _| c + 1) + self.pos } }; match debase64_no_pad(&self.enc[self.pos..end]) { None => self.err(), Some(rv) => { self.pos = end; Ok(rv) } } } fn err<T>(&self) -> Parsed<T> { Err(self.pos) } } #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum DecodeError { /// Byte position of first parse error ParseError(usize), /// Invalid Argon2 parameters given in encoding InvalidParams(ParamErr), } impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::DecodeError::*; match *self { ParseError(pos) => write!(f, "Parse error at position {}", pos), InvalidParams(ref perr) => { write!(f, "Invalid hash parameters given by encoded: {}", perr) } } } } impl Error for DecodeError { fn description(&self) -> &str { match *self { DecodeError::ParseError(_) => "Hash string parse error.", DecodeError::InvalidParams(ref perr) => perr.description(), } } } /// Represents a single Argon2 hashing session. A hash session comprises of the /// hash algorithm parameters, salt, key, and data used to hash a given input. #[derive(Debug, Eq, PartialEq)] pub struct Encoded { params: Argon2, hash: Vec<u8>, salt: Vec<u8>, key: Vec<u8>, data: Vec<u8>, } type Packed = ( Variant, Version, u32, u32, u32, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, ); impl Encoded { fn parse(encoded: &[u8]) -> Result<Packed, usize> { let mut p = Parser { enc: encoded, pos: 0, }; p.expect(b"$argon2")?; let variant = match p.read_until('$' as u8) { b"d" => Variant::Argon2d, b"i" => Variant::Argon2i, b"id" => Variant::Argon2id, x => return Err(p.pos - x.len()), }; p.expect(b"$")?; let vers = match p.expect(b"v=") { // Match the c reference impl's behavior, which defaults to a v0x10 // hash encoding since the `v=` field was only introduced with // v0x13. Err(_) => Version::_0x10, Ok(()) => { let vers = p.read_version()?; p.expect(b",")?; vers } }; p.expect(b"m=")?; let kib = p.read_u32()?; p.expect(b",t=")?; let passes = p.read_u32()?; p.expect(b",p=")?; let lanes = p.read_u32()?; let key = match p.expect(b",keyid=") { Err(_) => vec![], Ok(()) => p.decode64_till_one_of(b",$")?, }; let data = match p.expect(b",data=") { Ok(()) => p.decode64_till(Some(b'$'))?, Err(_) => vec![], }; p.expect(b"$")?; let salt = p.decode64_till(Some(b'$'))?; p.expect(b"$")?; let hash = p.decode64_till(None)?; Ok((variant, vers, kib, passes, lanes, key, data, salt, hash)) } /// Reconstruct a previous hash session from serialized bytes. pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> { match Self::parse(encoded) { Err(pos) => Err(DecodeError::ParseError(pos)), Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => { match Argon2::with_version(passes, lanes, kib, v, vers) { Err(e) => Err(DecodeError::InvalidParams(e)), Ok(a2) => Ok(Encoded { params: a2, hash: hash, salt: salt, key: key, data: data, }), } } } } /// Serialize this hashing session into raw bytes that can later be /// recovered by `Encoded::from_u8`. pub fn to_u8(&self) -> Vec<u8> { let vcode = |v| match v { Variant::Argon2i => "i", Variant::Argon2d => "d", Variant::Argon2id => "id", }; let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap(); let k_ = match &b
{ if bytes.len() % 4 != 1 && bytes.len() > 0 { let mut rv = vec![]; let mut pos = 0; while pos + 4 <= bytes.len() { let s = maybe!(triplet(&bytes[pos..pos + 4])); rv.extend_from_slice(&s); pos += 4; } if bytes.len() - pos == 2 { let a = maybe!(delut(bytes[pos])); let b = maybe!(delut(bytes[pos + 1])); rv.push(a << 2 | b >> 4); } else if bytes.len() - pos == 3 { let a = maybe!(delut(bytes[pos])); let b = maybe!(delut(bytes[pos + 1])); let c = maybe!(delut(bytes[pos + 2])); rv.push(a << 2 | b >> 4); rv.push(b << 4 | c >> 2);
identifier_body
verifier.rs
2 | b >> 4); } else if bytes.len() - pos == 3 { let a = maybe!(delut(bytes[pos])); let b = maybe!(delut(bytes[pos + 1])); let c = maybe!(delut(bytes[pos + 2])); rv.push(a << 2 | b >> 4); rv.push(b << 4 | c >> 2); } Some(rv) } else { None } } struct Parser<'a> { enc: &'a [u8], pos: usize, } impl<'a> fmt::Debug for Parser<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?; write!(f, "<-- {} -->", self.pos)?; write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?; Ok(()) } } type Parsed<T> = Result<T, usize>; impl<'a> Parser<'a> { fn expect(&mut self, exp: &[u8]) -> Parsed<()> { assert!(self.pos < self.enc.len()); if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()] != exp { self.err() } else { self.pos += exp.len(); Ok(()) } } fn read_until(&mut self, stopchar: u8) -> &'a [u8] { let start = self.pos; let stop = |c: &u8| *c == stopchar; self.pos = match self.enc[self.pos..].iter().position(stop) { None => self.enc.len() - 1, Some(end) => self.pos + end, }; &self.enc[start..self.pos] } fn read_u32(&mut self) -> Parsed<u32> { let is_digit = |c: u8| 48 <= c && c <= 57; let mut end = self.pos; while end < self.enc.len() && is_digit(self.enc[end]) { end += 1; } match str::from_utf8(&self.enc[self.pos..end]) { Err(_) => self.err(), Ok(s) => match s.parse() { Err(_) => self.err(), Ok(n) => { self.pos = end; Ok(n) } }, } } fn read_version(&mut self) -> Parsed<Version> { self.read_u32().and_then(|vers| match vers { 0x10 => Ok(Version::_0x10), 0x13 => Ok(Version::_0x13), _ => self.err(), }) } fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> { let end = self.enc[self.pos..] .iter() .position(|c| char_set.contains(c)) .map(|sub_pos| self.pos + sub_pos) .unwrap_or_else(|| self.enc.len()); match debase64_no_pad(&self.enc[self.pos..end]) { None => self.err(), Some(rv) => { self.pos = end; Ok(rv) } } } fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> { let end = match stopchar { None => self.enc.len(), Some(c) => { self.enc[self.pos..] .iter() .take_while(|k| **k != c) .fold(0, |c, _| c + 1) + self.pos } }; match debase64_no_pad(&self.enc[self.pos..end]) { None => self.err(), Some(rv) => { self.pos = end; Ok(rv) } } } fn err<T>(&self) -> Parsed<T> { Err(self.pos) } } #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum DecodeError { /// Byte position of first parse error ParseError(usize), /// Invalid Argon2 parameters given in encoding InvalidParams(ParamErr), } impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::DecodeError::*; match *self { ParseError(pos) => write!(f, "Parse error at position {}", pos), InvalidParams(ref perr) => { write!(f, "Invalid hash parameters given by encoded: {}", perr) } } } } impl Error for DecodeError { fn description(&self) -> &str { match *self { DecodeError::ParseError(_) => "Hash string parse error.", DecodeError::InvalidParams(ref perr) => perr.description(), } } } /// Represents a single Argon2 hashing session. A hash session comprises of the /// hash algorithm parameters, salt, key, and data used to hash a given input. #[derive(Debug, Eq, PartialEq)] pub struct Encoded { params: Argon2, hash: Vec<u8>, salt: Vec<u8>, key: Vec<u8>, data: Vec<u8>, } type Packed = ( Variant, Version, u32, u32, u32, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, ); impl Encoded { fn parse(encoded: &[u8]) -> Result<Packed, usize> { let mut p = Parser { enc: encoded, pos: 0, }; p.expect(b"$argon2")?; let variant = match p.read_until('$' as u8) { b"d" => Variant::Argon2d, b"i" => Variant::Argon2i, b"id" => Variant::Argon2id, x => return Err(p.pos - x.len()), }; p.expect(b"$")?; let vers = match p.expect(b"v=") { // Match the c reference impl's behavior, which defaults to a v0x10 // hash encoding since the `v=` field was only introduced with // v0x13. Err(_) => Version::_0x10, Ok(()) => { let vers = p.read_version()?; p.expect(b",")?; vers } }; p.expect(b"m=")?; let kib = p.read_u32()?; p.expect(b",t=")?; let passes = p.read_u32()?; p.expect(b",p=")?; let lanes = p.read_u32()?; let key = match p.expect(b",keyid=") { Err(_) => vec![], Ok(()) => p.decode64_till_one_of(b",$")?, }; let data = match p.expect(b",data=") { Ok(()) => p.decode64_till(Some(b'$'))?, Err(_) => vec![], }; p.expect(b"$")?; let salt = p.decode64_till(Some(b'$'))?; p.expect(b"$")?; let hash = p.decode64_till(None)?; Ok((variant, vers, kib, passes, lanes, key, data, salt, hash)) } /// Reconstruct a previous hash session from serialized bytes. pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> { match Self::parse(encoded) { Err(pos) => Err(DecodeError::ParseError(pos)), Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => { match Argon2::with_version(passes, lanes, kib, v, vers) { Err(e) => Err(DecodeError::InvalidParams(e)), Ok(a2) => Ok(Encoded { params: a2, hash: hash, salt: salt, key: key, data: data, }), } } } } /// Serialize this hashing session into raw bytes that can later be /// recovered by `Encoded::from_u8`. pub fn
(&self) -> Vec<u8> { let vcode = |v| match v { Variant::Argon2i => "i", Variant::Argon2d => "d", Variant::Argon2id => "id", }; let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap(); let k_ = match &b64(&self.key[..]) { bytes if bytes.len() > 0 => format!(",keyid={}", bytes), _ => String::new(), }; let x_ = match &b64(&self.data[..]) { bytes if bytes.len() > 0 => format!(",data={}", bytes), _ => String::new(), }; let (var, m, t, p, vers) = self.params(); format!( "$argon2{}$v={},m={},t={},p={}{}{}${}${}", v
to_u8
identifier_name
disk.rs
to convert a standard disk image to an async disk image. This conversion and the /// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is /// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned /// to the main device thread if the block device is destroyed or reset. fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>; } impl ToAsyncDisk for File { fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> { Ok(Box::new(SingleFileDisk::new(*self, ex)?)) } } /// The variants of image files on the host that can be used as virtual disks. #[derive(Debug, PartialEq, Eq)] pub enum ImageType { Raw, Qcow2, CompositeDisk, AndroidSparse, } fn log_host_fs_type(file: &File) -> Result<()> { let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?; info!("Disk image file is hosted on file system type {:x}", fstype); Ok(()) } /// Detect the type of an image file by checking for a valid header of the supported formats. pub fn detect_image_type(file: &File) -> Result<ImageType> { let mut f = file; let disk_size = f.get_len().map_err(Error::SeekingFile)?; let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?; f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?; info!("disk size {}, ", disk_size); log_host_fs_type(f)?; // Try to read the disk in a nicely-aligned block size unless the whole file is smaller. const MAGIC_BLOCK_SIZE: usize = 4096; #[repr(align(4096))] struct BlockAlignedBuffer { data: [u8; MAGIC_BLOCK_SIZE], } let mut magic = BlockAlignedBuffer { data: [0u8; MAGIC_BLOCK_SIZE], }; let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 { MAGIC_BLOCK_SIZE } else { // This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and // therefore is representable in usize. disk_size as usize }; f.read_exact(&mut magic.data[0..magic_read_len]) .map_err(Error::ReadingHeader)?; f.seek(SeekFrom::Start(orig_seek)) .map_err(Error::SeekingFile)?; #[cfg(feature = "composite-disk")] if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) { if cdisk_magic == CDISK_MAGIC.as_bytes() { return Ok(ImageType::CompositeDisk); } } #[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features. if let Some(magic4) = magic.data.get(0..4) { #[cfg(feature = "qcow")] if magic4 == QCOW_MAGIC.to_be_bytes() { return Ok(ImageType::Qcow2); } #[cfg(feature = "android-sparse")] if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() { return Ok(ImageType::AndroidSparse); } } Ok(ImageType::Raw) } /// Inspect the image file type and create an appropriate disk file to match it. pub fn create_disk_file( raw_image: File, is_sparse_file: bool, // max_nesting_depth is only used if the composite-disk or qcow features are enabled. #[allow(unused_variables)] mut max_nesting_depth: u32, // image_path is only used if the composite-disk feature is enabled. #[allow(unused_variables)] image_path: &Path, ) -> Result<Box<dyn DiskFile>> { if max_nesting_depth == 0 { return Err(Error::MaxNestingDepthExceeded); } #[allow(unused_assignments)] { max_nesting_depth -= 1; } let image_type = detect_image_type(&raw_image)?; Ok(match image_type { ImageType::Raw => { sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?; Box::new(raw_image) as Box<dyn DiskFile> } #[cfg(feature = "qcow")] ImageType::Qcow2 => { Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?) as Box<dyn DiskFile> } #[cfg(feature = "composite-disk")] ImageType::CompositeDisk => { // Valid composite disk header present Box::new( CompositeDiskFile::from_file( raw_image, is_sparse_file, max_nesting_depth, image_path, ) .map_err(Error::CreateCompositeDisk)?, ) as Box<dyn DiskFile> } #[cfg(feature = "android-sparse")] ImageType::AndroidSparse => { Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?) as Box<dyn DiskFile> } #[allow(unreachable_patterns)] _ => return Err(Error::UnknownType), }) } /// An asynchronously accessible disk. #[async_trait(?Send)] pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate { /// Returns the inner file consuming self. fn into_inner(self: Box<Self>) -> Box<dyn DiskFile>; /// Asynchronously fsyncs any completed operations to the disk. async fn fsync(&self) -> Result<()>; /// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`. /// `mem_offsets` is similar to an iovec except relative to the start of `mem`. async fn read_to_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize>; /// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`. async fn write_from_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize>; /// Replaces a range of bytes with a hole. async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>; /// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written. async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>; } /// A disk backed by a single file that implements `AsyncDisk` for access. pub struct SingleFileDisk { inner: Box<dyn IoSourceExt<File>>, } impl SingleFileDisk { pub fn new(disk: File, ex: &Executor) -> Result<Self> { ex.async_from(disk) .map_err(Error::CreateSingleFileDisk) .map(|inner| SingleFileDisk { inner }) } } impl DiskGetLen for SingleFileDisk { fn get_len(&self) -> io::Result<u64> { self.inner.as_source().get_len() } } impl FileSetLen for SingleFileDisk { fn set_len(&self, len: u64) -> io::Result<()> { self.inner.as_source().set_len(len) } } impl FileAllocate for SingleFileDisk { fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> { self.inner.as_source_mut().allocate(offset, len) } } #[async_trait(?Send)] impl AsyncDisk for SingleFileDisk { fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> { Box::new(self.inner.into_source()) } async fn fsync(&self) -> Result<()> { self.inner.fsync().await.map_err(Error::Fsync) } async fn read_to_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize> { self.inner .read_to_mem(Some(file_offset), mem, mem_offsets) .await .map_err(Error::ReadToMem) } async fn write_from_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize> { self.inner .write_from_mem(Some(file_offset), mem, mem_offsets) .await .map_err(Error::WriteFromMem) } async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> { self.inner .fallocate(file_offset, length, AllocateMode::PunchHole) .await .map_err(Error::Fallocate) } async fn
write_zeroes_at
identifier_name
disk.rs
Sparse; #[cfg(feature = "android-sparse")] use android_sparse::SPARSE_HEADER_MAGIC; /// Nesting depth limit for disk formats that can open other disk files. pub const MAX_NESTING_DEPTH: u32 = 10; #[derive(ThisError, Debug)] pub enum Error { #[error("failed to create block device: {0}")] BlockDeviceNew(base::Error), #[error("requested file conversion not supported")] ConversionNotSupported, #[cfg(feature = "android-sparse")] #[error("failure in android sparse disk: {0}")] CreateAndroidSparseDisk(android_sparse::Error), #[cfg(feature = "composite-disk")] #[error("failure in composite disk: {0}")] CreateCompositeDisk(composite::Error), #[error("failure creating single file disk: {0}")] CreateSingleFileDisk(cros_async::AsyncError), #[error("failure with fallocate: {0}")] Fallocate(cros_async::AsyncError), #[error("failure with fsync: {0}")] Fsync(cros_async::AsyncError), #[error("failure with fsync: {0}")] IoFsync(io::Error), #[error("checking host fs type: {0}")] HostFsType(base::Error), #[error("maximum disk nesting depth exceeded")] MaxNestingDepthExceeded, #[error("failure to punch hole: {0}")] PunchHole(io::Error), #[cfg(feature = "qcow")] #[error("failure in qcow: {0}")] QcowError(qcow::Error), #[error("failed to read data: {0}")] ReadingData(io::Error), #[error("failed to read header: {0}")] ReadingHeader(io::Error), #[error("failed to read to memory: {0}")] ReadToMem(cros_async::AsyncError), #[error("failed to seek file: {0}")] SeekingFile(io::Error), #[error("failed to set file size: {0}")] SettingFileSize(io::Error), #[error("unknown disk type")] UnknownType, #[error("failed to write from memory: {0}")] WriteFromMem(cros_async::AsyncError), #[error("failed to write from vec: {0}")] WriteFromVec(cros_async::AsyncError), #[error("failed to write zeroes: {0}")] WriteZeroes(io::Error), #[error("failed to write data: {0}")] WritingData(io::Error), #[cfg(windows)] #[error("failed to set disk file sparse: {0}")] SetSparseFailure(io::Error), } pub type Result<T> = std::result::Result<T, Error>; /// A trait for getting the length of a disk image or raw block device. pub trait DiskGetLen { /// Get the current length of the disk in bytes. fn get_len(&self) -> io::Result<u64>; } impl DiskGetLen for File { fn get_len(&self) -> io::Result<u64> { let mut s = self; let orig_seek = s.seek(SeekFrom::Current(0))?; let end = s.seek(SeekFrom::End(0))? as u64; s.seek(SeekFrom::Start(orig_seek))?; Ok(end) } } /// The prerequisites necessary to support a block device. #[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds. pub trait DiskFile: FileSetLen + DiskGetLen + FileSync + FileReadWriteAtVolatile + PunchHole + WriteZeroesAt + FileAllocate + ToAsyncDisk + Send + AsRawDescriptors + Debug { } impl< D: FileSetLen + DiskGetLen + FileSync + PunchHole + FileReadWriteAtVolatile + WriteZeroesAt + FileAllocate + ToAsyncDisk + Send + AsRawDescriptors + Debug, > DiskFile for D { } /// A `DiskFile` that can be converted for asychronous access. pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send { /// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk. /// Used to convert a standard disk image to an async disk image. This conversion and the /// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is /// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned /// to the main device thread if the block device is destroyed or reset. fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>; } impl ToAsyncDisk for File { fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> { Ok(Box::new(SingleFileDisk::new(*self, ex)?)) } } /// The variants of image files on the host that can be used as virtual disks. #[derive(Debug, PartialEq, Eq)] pub enum ImageType { Raw, Qcow2, CompositeDisk, AndroidSparse, } fn log_host_fs_type(file: &File) -> Result<()> { let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?; info!("Disk image file is hosted on file system type {:x}", fstype); Ok(()) } /// Detect the type of an image file by checking for a valid header of the supported formats. pub fn detect_image_type(file: &File) -> Result<ImageType> { let mut f = file; let disk_size = f.get_len().map_err(Error::SeekingFile)?; let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?; f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?; info!("disk size {}, ", disk_size); log_host_fs_type(f)?; // Try to read the disk in a nicely-aligned block size unless the whole file is smaller. const MAGIC_BLOCK_SIZE: usize = 4096; #[repr(align(4096))] struct BlockAlignedBuffer { data: [u8; MAGIC_BLOCK_SIZE], } let mut magic = BlockAlignedBuffer { data: [0u8; MAGIC_BLOCK_SIZE], }; let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 { MAGIC_BLOCK_SIZE } else { // This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and // therefore is representable in usize. disk_size as usize }; f.read_exact(&mut magic.data[0..magic_read_len]) .map_err(Error::ReadingHeader)?; f.seek(SeekFrom::Start(orig_seek)) .map_err(Error::SeekingFile)?; #[cfg(feature = "composite-disk")] if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) { if cdisk_magic == CDISK_MAGIC.as_bytes() { return Ok(ImageType::CompositeDisk); } } #[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features. if let Some(magic4) = magic.data.get(0..4) { #[cfg(feature = "qcow")] if magic4 == QCOW_MAGIC.to_be_bytes() { return Ok(ImageType::Qcow2); } #[cfg(feature = "android-sparse")] if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() { return Ok(ImageType::AndroidSparse); } } Ok(ImageType::Raw) }
// max_nesting_depth is only used if the composite-disk or qcow features are enabled. #[allow(unused_variables)] mut max_nesting_depth: u32, // image_path is only used if the composite-disk feature is enabled. #[allow(unused_variables)] image_path: &Path, ) -> Result<Box<dyn DiskFile>> { if max_nesting_depth == 0 { return Err(Error::MaxNestingDepthExceeded); } #[allow(unused_assignments)] { max_nesting_depth -= 1; } let image_type = detect_image_type(&raw_image)?; Ok(match image_type { ImageType::Raw => { sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?; Box::new(raw_image) as Box<dyn DiskFile> } #[cfg(feature = "qcow")] ImageType::Qcow2 => { Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?) as Box<dyn DiskFile> } #[cfg(feature = "composite-disk")] ImageType::CompositeDisk => { // Valid composite disk header present Box::new( CompositeDiskFile::from_file( raw_image, is_sparse_file, max_nesting_depth, image_path, ) .map_err(Error::CreateCompositeDisk)?, ) as Box<dyn DiskFile> } #[cfg(feature
/// Inspect the image file type and create an appropriate disk file to match it. pub fn create_disk_file( raw_image: File, is_sparse_file: bool,
random_line_split
disk.rs
; #[cfg(feature = "android-sparse")] use android_sparse::SPARSE_HEADER_MAGIC; /// Nesting depth limit for disk formats that can open other disk files. pub const MAX_NESTING_DEPTH: u32 = 10; #[derive(ThisError, Debug)] pub enum Error { #[error("failed to create block device: {0}")] BlockDeviceNew(base::Error), #[error("requested file conversion not supported")] ConversionNotSupported, #[cfg(feature = "android-sparse")] #[error("failure in android sparse disk: {0}")] CreateAndroidSparseDisk(android_sparse::Error), #[cfg(feature = "composite-disk")] #[error("failure in composite disk: {0}")] CreateCompositeDisk(composite::Error), #[error("failure creating single file disk: {0}")] CreateSingleFileDisk(cros_async::AsyncError), #[error("failure with fallocate: {0}")] Fallocate(cros_async::AsyncError), #[error("failure with fsync: {0}")] Fsync(cros_async::AsyncError), #[error("failure with fsync: {0}")] IoFsync(io::Error), #[error("checking host fs type: {0}")] HostFsType(base::Error), #[error("maximum disk nesting depth exceeded")] MaxNestingDepthExceeded, #[error("failure to punch hole: {0}")] PunchHole(io::Error), #[cfg(feature = "qcow")] #[error("failure in qcow: {0}")] QcowError(qcow::Error), #[error("failed to read data: {0}")] ReadingData(io::Error), #[error("failed to read header: {0}")] ReadingHeader(io::Error), #[error("failed to read to memory: {0}")] ReadToMem(cros_async::AsyncError), #[error("failed to seek file: {0}")] SeekingFile(io::Error), #[error("failed to set file size: {0}")] SettingFileSize(io::Error), #[error("unknown disk type")] UnknownType, #[error("failed to write from memory: {0}")] WriteFromMem(cros_async::AsyncError), #[error("failed to write from vec: {0}")] WriteFromVec(cros_async::AsyncError), #[error("failed to write zeroes: {0}")] WriteZeroes(io::Error), #[error("failed to write data: {0}")] WritingData(io::Error), #[cfg(windows)] #[error("failed to set disk file sparse: {0}")] SetSparseFailure(io::Error), } pub type Result<T> = std::result::Result<T, Error>; /// A trait for getting the length of a disk image or raw block device. pub trait DiskGetLen { /// Get the current length of the disk in bytes. fn get_len(&self) -> io::Result<u64>; } impl DiskGetLen for File { fn get_len(&self) -> io::Result<u64> { let mut s = self; let orig_seek = s.seek(SeekFrom::Current(0))?; let end = s.seek(SeekFrom::End(0))? as u64; s.seek(SeekFrom::Start(orig_seek))?; Ok(end) } } /// The prerequisites necessary to support a block device. #[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds. pub trait DiskFile: FileSetLen + DiskGetLen + FileSync + FileReadWriteAtVolatile + PunchHole + WriteZeroesAt + FileAllocate + ToAsyncDisk + Send + AsRawDescriptors + Debug { } impl< D: FileSetLen + DiskGetLen + FileSync + PunchHole + FileReadWriteAtVolatile + WriteZeroesAt + FileAllocate + ToAsyncDisk + Send + AsRawDescriptors + Debug, > DiskFile for D { } /// A `DiskFile` that can be converted for asychronous access. pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send { /// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk. /// Used to convert a standard disk image to an async disk image. This conversion and the /// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is /// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned /// to the main device thread if the block device is destroyed or reset. fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>; } impl ToAsyncDisk for File { fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> { Ok(Box::new(SingleFileDisk::new(*self, ex)?)) } } /// The variants of image files on the host that can be used as virtual disks. #[derive(Debug, PartialEq, Eq)] pub enum ImageType { Raw, Qcow2, CompositeDisk, AndroidSparse, } fn log_host_fs_type(file: &File) -> Result<()> { let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?; info!("Disk image file is hosted on file system type {:x}", fstype); Ok(()) } /// Detect the type of an image file by checking for a valid header of the supported formats. pub fn detect_image_type(file: &File) -> Result<ImageType> { let mut f = file; let disk_size = f.get_len().map_err(Error::SeekingFile)?; let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?; f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?; info!("disk size {}, ", disk_size); log_host_fs_type(f)?; // Try to read the disk in a nicely-aligned block size unless the whole file is smaller. const MAGIC_BLOCK_SIZE: usize = 4096; #[repr(align(4096))] struct BlockAlignedBuffer { data: [u8; MAGIC_BLOCK_SIZE], } let mut magic = BlockAlignedBuffer { data: [0u8; MAGIC_BLOCK_SIZE], }; let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 { MAGIC_BLOCK_SIZE } else { // This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and // therefore is representable in usize. disk_size as usize }; f.read_exact(&mut magic.data[0..magic_read_len]) .map_err(Error::ReadingHeader)?; f.seek(SeekFrom::Start(orig_seek)) .map_err(Error::SeekingFile)?; #[cfg(feature = "composite-disk")] if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) { if cdisk_magic == CDISK_MAGIC.as_bytes() { return Ok(ImageType::CompositeDisk); } } #[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features. if let Some(magic4) = magic.data.get(0..4) { #[cfg(feature = "qcow")] if magic4 == QCOW_MAGIC.to_be_bytes() { return Ok(ImageType::Qcow2); } #[cfg(feature = "android-sparse")] if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() { return Ok(ImageType::AndroidSparse); } } Ok(ImageType::Raw) } /// Inspect the image file type and create an appropriate disk file to match it. pub fn create_disk_file( raw_image: File, is_sparse_file: bool, // max_nesting_depth is only used if the composite-disk or qcow features are enabled. #[allow(unused_variables)] mut max_nesting_depth: u32, // image_path is only used if the composite-disk feature is enabled. #[allow(unused_variables)] image_path: &Path, ) -> Result<Box<dyn DiskFile>> { if max_nesting_depth == 0 { return Err(Error::MaxNestingDepthExceeded); } #[allow(unused_assignments)] { max_nesting_depth -= 1; } let image_type = detect_image_type(&raw_image)?; Ok(match image_type { ImageType::Raw =>
#[cfg(feature = "qcow")] ImageType::Qcow2 => { Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?) as Box<dyn DiskFile> } #[cfg(feature = "composite-disk")] ImageType::CompositeDisk => { // Valid composite disk header present Box::new( CompositeDiskFile::from_file( raw_image, is_sparse_file, max_nesting_depth, image_path, ) .map_err(Error::CreateCompositeDisk)?, ) as Box<dyn DiskFile> } #[cfg
{ sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?; Box::new(raw_image) as Box<dyn DiskFile> }
conditional_block
gen_functionalization_type.py
. def unwrap_tensor_args(sig: DispatcherSignature) -> Tuple[str, List[Binding]]: context: List[Binding] = [] unwrapped_tensor_args: List[str] = [] for arg in sig.arguments(): if is_tensor_like(arg.argument): # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. unwrapped_name = f'{arg.name}_' unwrapped_tensor_args.append( f'auto {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});') context.append(arg.with_name(unwrapped_name)) else: # for non-tensor inputs, we want to pass them directly into the redispatch calls. context.append(arg) unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args) return unwrap_tensor_args_str, context # converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns: # (1) a string containing all of the logic that does the conversions. # (2) a context, to be used by translate(), with all of the relevant bindings. def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]: context: List[Binding] = [] unwrapped_tensor_args: List[str] = [] for arg in sig.arguments(): if is_tensor_like(arg.argument): # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. a_ = arg.name unwrapped_name = f'{arg.name}_meta' unwrapped_tensor_args.append( f"auto {unwrapped_name} = at::native::empty_strided_meta({a_}.sizes(), {a_}.strides(), \ /*dtype=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \ /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);" ) context.append(arg.with_name(unwrapped_name)) else: # for non-tensor inputs, we want to pass them directly into the redispatch calls. context.append(arg) unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args) return unwrap_tensor_args_str, context # Generates the Functionalization kernel for: # - ops that create aliases (e.g. transpose()) # - ops that are views AND mutations (e.g. transpose_()) def emit_view_functionalization_body( f: NativeFunction, functional_op: NativeFunction ) -> str: # view op case assert f.is_view_op if f.tag is Tag.inplace_view: # This op is both an inplace op AND a view op. # See Note [Functionalization Pass - Inplace View Ops] for details. # I currently have the view meta call into the out-of-place variant of the view, to avoid # having to define an extra ~20 inplace {view}_inverse_ functions. # Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops. # I'm assuming that every inplace-view op has a corresponding out-of-place view op, # with the same name but the trailing underscore removed. # This is currently asserted at parse time in gen.py (see error_check_native_functions). assert f.func.kind() is SchemaKind.inplace # Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand. assert functional_op is not None api_name = functional_op.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(functional_op.func) else: api_name = f.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(f.func) dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)] forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False) reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True) # The meta API call should use the same arguments, but convert all tensors to meta tensors first. meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig) meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)] if f.tag is Tag.inplace_view: # See Note [Functionalization Pass - Inplace View Ops] for more details return f""" at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); at::functionalization::impl::mutate_view_meta(self, view_meta); {unwrap_tensor_args_str} {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); }} // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); return self; """ else: return f""" {unwrap_tensor_args_str} {return_type} tmp_output; {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)}); // I'm fusing the [alias removal], [mutation removal], [add views back] passes together. // Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal). }} at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); return out; """ # Generates the Functionalization kernel for inplace ops def emit_inplace_functionalization_body( f: NativeFunction, functional_op: Optional[NativeFunction] ) -> str: # mutation case assert(modifies_arguments(f)) dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) maybe_return = '' if len(f.func.returns) == 0 else 'return ' sync_tensor_args = '\n '.join(mapMaybe( lambda arg: f'at::functionalization::impl::sync({arg.name});' if arg.type.is_tensor_like() else None, f.func.arguments.flat_all)) if functional_op is None: # We can't functionalize this inplace op, since we don't know what the corresponding functional op is. inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)] warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \ because it couldn't find an out-of-place equivalent of the operator to call. \ Instead, it's calling the inplace/view operator directly. \ If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name)) return f""" if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{ TORCH_WARN("{warn_str}"); }} {sync_tensor_args} {unwrap_tensor_args_str} at::AutoDispatchSkipFunctionalize guard; // Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops. {maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)}); """ # call the out-of-place variant of the op functional_sig = DispatcherSignature.from_schema(functional_op.func)
mutable_input_post_processing = '\n'.join([ f""" auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name}); {a.name}_functional->replace_(tmp_output);
functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)]
random_line_split
gen_functionalization_type.py
List[str] = [] for arg in sig.arguments(): if is_tensor_like(arg.argument): # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. a_ = arg.name unwrapped_name = f'{arg.name}_meta' unwrapped_tensor_args.append( f"auto {unwrapped_name} = at::native::empty_strided_meta({a_}.sizes(), {a_}.strides(), \ /*dtype=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \ /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);" ) context.append(arg.with_name(unwrapped_name)) else: # for non-tensor inputs, we want to pass them directly into the redispatch calls. context.append(arg) unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args) return unwrap_tensor_args_str, context # Generates the Functionalization kernel for: # - ops that create aliases (e.g. transpose()) # - ops that are views AND mutations (e.g. transpose_()) def emit_view_functionalization_body( f: NativeFunction, functional_op: NativeFunction ) -> str: # view op case assert f.is_view_op if f.tag is Tag.inplace_view: # This op is both an inplace op AND a view op. # See Note [Functionalization Pass - Inplace View Ops] for details. # I currently have the view meta call into the out-of-place variant of the view, to avoid # having to define an extra ~20 inplace {view}_inverse_ functions. # Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops. # I'm assuming that every inplace-view op has a corresponding out-of-place view op, # with the same name but the trailing underscore removed. # This is currently asserted at parse time in gen.py (see error_check_native_functions). assert f.func.kind() is SchemaKind.inplace # Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand. assert functional_op is not None api_name = functional_op.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(functional_op.func) else: api_name = f.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(f.func) dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)] forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False) reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True) # The meta API call should use the same arguments, but convert all tensors to meta tensors first. meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig) meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)] if f.tag is Tag.inplace_view: # See Note [Functionalization Pass - Inplace View Ops] for more details return f""" at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); at::functionalization::impl::mutate_view_meta(self, view_meta); {unwrap_tensor_args_str} {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); }} // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); return self; """ else: return f""" {unwrap_tensor_args_str} {return_type} tmp_output; {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)}); // I'm fusing the [alias removal], [mutation removal], [add views back] passes together. // Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal). }} at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); return out; """ # Generates the Functionalization kernel for inplace ops def emit_inplace_functionalization_body( f: NativeFunction, functional_op: Optional[NativeFunction] ) -> str: # mutation case assert(modifies_arguments(f)) dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) maybe_return = '' if len(f.func.returns) == 0 else 'return ' sync_tensor_args = '\n '.join(mapMaybe( lambda arg: f'at::functionalization::impl::sync({arg.name});' if arg.type.is_tensor_like() else None, f.func.arguments.flat_all)) if functional_op is None: # We can't functionalize this inplace op, since we don't know what the corresponding functional op is. inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)] warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \ because it couldn't find an out-of-place equivalent of the operator to call. \ Instead, it's calling the inplace/view operator directly. \ If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name)) return f""" if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{ TORCH_WARN("{warn_str}"); }} {sync_tensor_args} {unwrap_tensor_args_str} at::AutoDispatchSkipFunctionalize guard; // Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops. {maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)}); """ # call the out-of-place variant of the op functional_sig = DispatcherSignature.from_schema(functional_op.func) functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)] mutable_input_post_processing = '\n'.join([ f""" auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name}); {a.name}_functional->replace_(tmp_output); {a.name}_functional->commit_update();""" for a in f.func.arguments.flat_non_out if a.annotation and a.annotation.is_write and a.type.is_tensor_like()]) return f""" {sync_tensor_args} {unwrap_tensor_args_str} {return_type} tmp_output; {{ at::AutoDispatchSkipFunctionalize guard; // The functionalization pass explicitly doesn't pass out= parameters to the redispatch tmp_output = at::_ops::{functional_op.func.name.unambiguous_name()}::redispatch({', '.join(functional_exprs)}); }} {mutable_input_post_processing} {return_str(f)};""" def emit_declaration_for_noncomposite_views(f: NativeFunction) -> str: # For every view op, we need a corresponding "inverse view" function. # This generates the declarations so we get a good compiler error when someone adds a new view. view_inverse_sig = ViewInverseSignature(f) return view_inverse_sig.decl() # The below functions generate RegisterFunctionalization.cpp # These files provide the kernels that run the functionalization pass, which can be opted into # per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch). def
gen_functionalization_registration
identifier_name
gen_functionalization_type.py
. def unwrap_tensor_args(sig: DispatcherSignature) -> Tuple[str, List[Binding]]: context: List[Binding] = [] unwrapped_tensor_args: List[str] = [] for arg in sig.arguments(): if is_tensor_like(arg.argument): # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. unwrapped_name = f'{arg.name}_' unwrapped_tensor_args.append( f'auto {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});') context.append(arg.with_name(unwrapped_name)) else: # for non-tensor inputs, we want to pass them directly into the redispatch calls. context.append(arg) unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args) return unwrap_tensor_args_str, context # converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns: # (1) a string containing all of the logic that does the conversions. # (2) a context, to be used by translate(), with all of the relevant bindings. def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]: context: List[Binding] = [] unwrapped_tensor_args: List[str] = [] for arg in sig.arguments(): if is_tensor_like(arg.argument): # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. # for tensor inputs, we want to unwrap them before passing them into the redispatch calls. a_ = arg.name unwrapped_name = f'{arg.name}_meta' unwrapped_tensor_args.append( f"auto {unwrapped_name} = at::native::empty_strided_meta({a_}.sizes(), {a_}.strides(), \ /*dtype=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \ /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);" ) context.append(arg.with_name(unwrapped_name)) else: # for non-tensor inputs, we want to pass them directly into the redispatch calls. context.append(arg) unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args) return unwrap_tensor_args_str, context # Generates the Functionalization kernel for: # - ops that create aliases (e.g. transpose()) # - ops that are views AND mutations (e.g. transpose_()) def emit_view_functionalization_body( f: NativeFunction, functional_op: NativeFunction ) -> str: # view op case
dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)] forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False) reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True) # The meta API call should use the same arguments, but convert all tensors to meta tensors first. meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig) meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)] if f.tag is Tag.inplace_view: # See Note [Functionalization Pass - Inplace View Ops] for more details return f""" at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); at::functionalization::impl::mutate_view_meta(self, view_meta); {unwrap_tensor_args_str} {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); }} // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); return self; """ else: return f""" {unwrap_tensor_args_str} {return_type} tmp_output; {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)}); // I'm fusing the [alias removal], [mutation removal], [add views back] passes together. // Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal). }} at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); return out; """ # Generates the Functionalization kernel for inplace ops def emit_inplace_functionalization_body( f: NativeFunction, functional_op: Optional[NativeFunction] ) -> str: # mutation case assert(modifies_arguments(f)) dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) maybe_return = '' if len(f.func.returns) == 0 else 'return ' sync_tensor_args = '\n '.join(mapMaybe( lambda arg: f'at::functionalization::impl::sync({arg.name});' if arg.type.is_tensor_like() else None, f.func.arguments.flat_all)) if functional_op is None: # We can't functionalize this inplace op, since we don't know what the corresponding functional op is. inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)] warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \ because it couldn't find an out-of-place equivalent of the operator to call. \ Instead, it's calling the inplace/view operator directly. \ If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name)) return f""" if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{ TORCH_WARN("{warn_str}"); }} {sync_tensor_args} {unwrap_tensor_args_str} at::AutoDispatchSkipFunctionalize guard; // Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops. {maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)}); """ # call the out-of-place variant of the op functional_sig = DispatcherSignature.from_schema(functional_op.func) functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)] mutable_input_post_processing = '\n'.join([ f""" auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name}); {a.name}_functional->replace_(tmp_output);
assert f.is_view_op if f.tag is Tag.inplace_view: # This op is both an inplace op AND a view op. # See Note [Functionalization Pass - Inplace View Ops] for details. # I currently have the view meta call into the out-of-place variant of the view, to avoid # having to define an extra ~20 inplace {view}_inverse_ functions. # Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops. # I'm assuming that every inplace-view op has a corresponding out-of-place view op, # with the same name but the trailing underscore removed. # This is currently asserted at parse time in gen.py (see error_check_native_functions). assert f.func.kind() is SchemaKind.inplace # Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand. assert functional_op is not None api_name = functional_op.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(functional_op.func) else: api_name = f.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(f.func)
identifier_body
gen_functionalization_type.py
=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \ /*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);" ) context.append(arg.with_name(unwrapped_name)) else: # for non-tensor inputs, we want to pass them directly into the redispatch calls. context.append(arg) unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args) return unwrap_tensor_args_str, context # Generates the Functionalization kernel for: # - ops that create aliases (e.g. transpose()) # - ops that are views AND mutations (e.g. transpose_()) def emit_view_functionalization_body( f: NativeFunction, functional_op: NativeFunction ) -> str: # view op case assert f.is_view_op if f.tag is Tag.inplace_view: # This op is both an inplace op AND a view op. # See Note [Functionalization Pass - Inplace View Ops] for details. # I currently have the view meta call into the out-of-place variant of the view, to avoid # having to define an extra ~20 inplace {view}_inverse_ functions. # Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops. # I'm assuming that every inplace-view op has a corresponding out-of-place view op, # with the same name but the trailing underscore removed. # This is currently asserted at parse time in gen.py (see error_check_native_functions). assert f.func.kind() is SchemaKind.inplace # Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand. assert functional_op is not None api_name = functional_op.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(functional_op.func) else: api_name = f.func.name.unambiguous_name() call_sig = DispatcherSignature.from_schema(f.func) dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)] forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False) reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True) # The meta API call should use the same arguments, but convert all tensors to meta tensors first. meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig) meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)] if f.tag is Tag.inplace_view: # See Note [Functionalization Pass - Inplace View Ops] for more details return f""" at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); at::functionalization::impl::mutate_view_meta(self, view_meta); {unwrap_tensor_args_str} {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); }} // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output); return self; """ else: return f""" {unwrap_tensor_args_str} {return_type} tmp_output; {return_type} reference_tensor_output; {{ at::AutoDispatchSkipFunctionalize guard; {meta_conversion_str} reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)}); tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)}); // I'm fusing the [alias removal], [mutation removal], [add views back] passes together. // Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal). }} at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta( {forward_lambda.decl()} {{ return {forward_lambda.inner_call()} }}, {reverse_lambda.decl()} {{ return {reverse_lambda.inner_call()} }} ); auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta); // See Note [Propagating strides in the functionalization pass] at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output); return out; """ # Generates the Functionalization kernel for inplace ops def emit_inplace_functionalization_body( f: NativeFunction, functional_op: Optional[NativeFunction] ) -> str: # mutation case assert(modifies_arguments(f)) dispatcher_sig = DispatcherSignature.from_schema(f.func) keyset = 'dispatchKeySet & c10::after_func_keyset' return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type() unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig) maybe_return = '' if len(f.func.returns) == 0 else 'return ' sync_tensor_args = '\n '.join(mapMaybe( lambda arg: f'at::functionalization::impl::sync({arg.name});' if arg.type.is_tensor_like() else None, f.func.arguments.flat_all)) if functional_op is None: # We can't functionalize this inplace op, since we don't know what the corresponding functional op is. inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)] warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \ because it couldn't find an out-of-place equivalent of the operator to call. \ Instead, it's calling the inplace/view operator directly. \ If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name)) return f""" if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{ TORCH_WARN("{warn_str}"); }} {sync_tensor_args} {unwrap_tensor_args_str} at::AutoDispatchSkipFunctionalize guard; // Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops. {maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)}); """ # call the out-of-place variant of the op functional_sig = DispatcherSignature.from_schema(functional_op.func) functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)] mutable_input_post_processing = '\n'.join([ f""" auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name}); {a.name}_functional->replace_(tmp_output); {a.name}_functional->commit_update();""" for a in f.func.arguments.flat_non_out if a.annotation and a.annotation.is_write and a.type.is_tensor_like()]) return f""" {sync_tensor_args} {unwrap_tensor_args_str} {return_type} tmp_output; {{ at::AutoDispatchSkipFunctionalize guard; // The functionalization pass explicitly doesn't pass out= parameters to the redispatch tmp_output = at::_ops::{functional_op.func.name.unambiguous_name()}::redispatch({', '.join(functional_exprs)}); }} {mutable_input_post_processing} {return_str(f)};""" def emit_declaration_for_noncomposite_views(f: NativeFunction) -> str: # For every view op, we need a corresponding "inverse view" function. # This generates the declarations so we get a good compiler error when someone adds a new view. view_inverse_sig = ViewInverseSignature(f) return view_inverse_sig.decl() # The below functions generate RegisterFunctionalization.cpp # These files provide the kernels that run the functionalization pass, which can be opted into # per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch). def gen_functionalization_registration( selector: SelectiveBuilder, f: NativeFunction, composite_implicit_autograd_index: BackendIndex ) -> Optional[str]: @with_native_function def emit_registration_helper(f: NativeFunction) -> Optional[str]: # Note: for now, this logic is meant to avoid registering functionalization kernels for mobile. # At some point, Vulkan we'll want to use functionalization and we'll need to change this. if not selector.include_all_operators: return None if not f.is_view_op and not modifies_arguments(f):
return None
conditional_block
iterative_gmm.py
: def __init__(self): self.XY = None def plot_cov(self,means, covariances,ct): if ct == 'spherical': return color_iter = itertools.cycle(['navy', 'navy', 'cornflowerblue', 'gold', 'darkorange']) ax =plt.gca() for i, (mean, covar, color) in enumerate(zip( means, covariances, color_iter)): v, w = linalg.eigh(covar) v = 2. * np.sqrt(2.) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180. * angle / np.pi # convert to degrees alpha = 0.2 ell = Ellipse(mean, v[0], v[1], 180. + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(alpha) ax.add_artist(ell) ell = Ellipse(mean, v[0]*4, v[1]*2, 180. + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(alpha) ell = Ellipse(mean, v[0]*2, v[1]*2, 180. + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(alpha) ax.add_artist(ell) # multi_normal = multivariate_normal(mean[0:2],u[0:2]) # ax.contour(np.sort(X[:,0]),np.sort(X[:,1]), # multi_normal.pdf(self.XY).reshape(X.shape[0],X.shape[0]), # colors='black',alpha=0.3) ax.scatter(mean[0],mean[1],c='grey',zorder=10,s=100) def iterative_gmm(self,dataset = 'bb',fake = True,mode = 'gmm',binary = False,im_dir = './images/',savegif = False,title ='temp',bic_thresh = 0,maxiter = 40,nc =5,v_and_1 = False,thresh = 0.9,cov=[],n_components=2,covt='spherical',ra=False,pca = True): ''' dataset: string The filename of the material something like 'bb','pp' fake: bool Whether or not the data is fake, if it is not it will be cropped mode: str 'fraction' will reduce the input to a combination of the relative signals e.g. bin1 - bin0/sum binary: bool Whether or not to show the output as binary or not nc: int pca components ''' # Clear the imagedir if savegif: folder = im_dir for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) #elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) if ra: arrowsU = [] arrowsV = [] bic0 = np.infty itern = 0 inds = [2,3,4] label_true = loadmat('2'+dataset+'_mask')['BW'] X1 = loadmat('all'+dataset)['Z'][:,inds] if not fake: X1 = X1[400:,:].copy() label_true = label_true[400:].copy() length = 48 else: length = 68 # This is code for just looking at the ratio of the bins if mode == 'fraction': # initialize vector for fraction X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))]) result = [x for x in itertools.combinations(np.arange(5),2)] for jj in range(0,X2.shape[1]): r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F") X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ind = np.argsort(np.mean(X1,0)) # X1 = X2[:,ind[:4]] else: for jj in range(0,X1.shape[1]): r2 = np.reshape(X1[:,jj], (20,length), order="F") X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ct = 'full' if mode == 'bgmm': bgmm = mixture.BayesianGaussianMixture( n_components=n_components, covariance_type=covt) elif mode == 'kmeans': km = KMeans(n_clusters=n_components) gmm = mixture.GaussianMixture(n_components=n_components, covariance_type=covt) gmm1 = mixture.GaussianMixture(n_components=n_components, covariance_type='full') ims = [] pca = True # Do the PCA decomposition if pca: X1 = PCA(n_components=nc).fit_transform(X1) X3 = X1[:,0:2].copy() fig = plt.figure(figsize=(10,10)) bics = [] for ii in range(0,maxiter): X = X1.copy() if mode == 'gmm': y_pred = gmm.fit_predict(X) elif mode == 'bgmm': y_pred = bgmm.fit_predict(X) y_ff = gmm.fit(X) elif mode == 'kmeans': y_pred = km.fit_predict(X) y_ff = gmm.fit(X) y_ff1 = gmm1.fit(X) # if I should show the vmeasure if v_and_1: homo1,comp1,vs1 = homogeneity_completeness_v_measure( label_true.squeeze(), y_pred) bic = gmm.aic(X) bic1 = gmm1.aic(X) print(vs1,itern,bic,bic1) else: bic = gmm.aic(X) print(bic) # Stop if bic is lower if bic - bic0 < bic_thresh: bic0 = bic else: print('BIC got higher') break print(bic) # map the bad values to zero for kk in range(n_components): temp = X[y_pred == kk,:] if cov == 'robust': robust_cov = MinCovDet().fit(temp) else: robust_cov = EmpiricalCovariance().fit(temp) # Calculating the mahal distances robust_mahal = robust_cov.mahalanobis( temp - robust_cov.location_) ** (0.33) if thresh < 1: temp[robust_mahal > robust_mahal.max()*thresh] = 0 else: # import pdb; pdb.set_trace() temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0 X[y_pred == kk,:] = temp mask_one = X[:,1] == 0 if y_pred[3] == 0: # Map top to zero if it is the wrong combo y_pred = y_pred + 1 y_pred[y_pred == n_components] = 0 m_reshape = np.reshape(mask_one, (20,length), order="F") if itern == 0: y_0 = y_pred # Plotting functions ax0 = fig.add_subplot(111) a = -(y_pred - label_true.squeeze()) y_reshape = np.reshape(a, (20,length), order="F") colorz = ['b','r','g','m'] for jj,color in zip(range(a.min(),a.max()+1),colorz): print(jj) b = a == jj b = [i for i, x in enumerate(b) if x] if jj == 0: c = b ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())]) ax0.set_title('New Method') self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full') if itern == 0: axes = plt.gca() ylim = axes.get_ylim() xlim = axes.get_xlim() ax0.set_xlim(xlim) ax0.set_ylim(ylim) ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1) plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format
I_gmm
identifier_name
iterative_gmm.py
else: length = 68 # This is code for just looking at the ratio of the bins if mode == 'fraction': # initialize vector for fraction X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))]) result = [x for x in itertools.combinations(np.arange(5),2)] for jj in range(0,X2.shape[1]): r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F") X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ind = np.argsort(np.mean(X1,0)) # X1 = X2[:,ind[:4]] else: for jj in range(0,X1.shape[1]): r2 = np.reshape(X1[:,jj], (20,length), order="F") X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ct = 'full' if mode == 'bgmm': bgmm = mixture.BayesianGaussianMixture( n_components=n_components, covariance_type=covt) elif mode == 'kmeans': km = KMeans(n_clusters=n_components) gmm = mixture.GaussianMixture(n_components=n_components, covariance_type=covt) gmm1 = mixture.GaussianMixture(n_components=n_components, covariance_type='full') ims = [] pca = True # Do the PCA decomposition if pca: X1 = PCA(n_components=nc).fit_transform(X1) X3 = X1[:,0:2].copy() fig = plt.figure(figsize=(10,10)) bics = [] for ii in range(0,maxiter): X = X1.copy() if mode == 'gmm': y_pred = gmm.fit_predict(X) elif mode == 'bgmm': y_pred = bgmm.fit_predict(X) y_ff = gmm.fit(X) elif mode == 'kmeans': y_pred = km.fit_predict(X) y_ff = gmm.fit(X) y_ff1 = gmm1.fit(X) # if I should show the vmeasure if v_and_1: homo1,comp1,vs1 = homogeneity_completeness_v_measure( label_true.squeeze(), y_pred) bic = gmm.aic(X) bic1 = gmm1.aic(X) print(vs1,itern,bic,bic1) else: bic = gmm.aic(X) print(bic) # Stop if bic is lower if bic - bic0 < bic_thresh: bic0 = bic else: print('BIC got higher') break print(bic) # map the bad values to zero for kk in range(n_components): temp = X[y_pred == kk,:] if cov == 'robust': robust_cov = MinCovDet().fit(temp) else: robust_cov = EmpiricalCovariance().fit(temp) # Calculating the mahal distances robust_mahal = robust_cov.mahalanobis( temp - robust_cov.location_) ** (0.33) if thresh < 1: temp[robust_mahal > robust_mahal.max()*thresh] = 0 else: # import pdb; pdb.set_trace() temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0 X[y_pred == kk,:] = temp mask_one = X[:,1] == 0 if y_pred[3] == 0: # Map top to zero if it is the wrong combo y_pred = y_pred + 1 y_pred[y_pred == n_components] = 0 m_reshape = np.reshape(mask_one, (20,length), order="F") if itern == 0: y_0 = y_pred # Plotting functions ax0 = fig.add_subplot(111) a = -(y_pred - label_true.squeeze()) y_reshape = np.reshape(a, (20,length), order="F") colorz = ['b','r','g','m'] for jj,color in zip(range(a.min(),a.max()+1),colorz): print(jj) b = a == jj b = [i for i, x in enumerate(b) if x] if jj == 0: c = b ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())]) ax0.set_title('New Method') self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full') if itern == 0: axes = plt.gca() ylim = axes.get_ylim() xlim = axes.get_xlim() ax0.set_xlim(xlim) ax0.set_ylim(ylim) ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1) plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03"))) ax3 = plt.axes([.22, .15, .15, .1]) bics.append(bic) plt.plot(bics) plt.yticks([]) plt.xlabel('iteration') plt.ylabel('BIC') ax2 = plt.axes([.25, .55, .6, .4], facecolor='y') if binary: plt.imshow(y_reshape,cmap='brg') else: plt.imshow(np.reshape(X1[:,0], (20,length), order="F")) plt.title('Image Space') plt.xticks([]) plt.yticks([]) if savegif: plt.savefig(im_dir + '{}.png'.format(format(itern, "02"))) itern += 1 i, j = np.where(m_reshape == True) # if binary: # plt.imshow(y_reshape,cmap='brg') # else: plt.scatter(j,i,marker='x',c='k') # import pdb; pdb.set_trace() d = [i for i, x in enumerate(mask_one) if x] ax0.scatter(X1[d,0],X1[d,1],marker='x',c='k') if savegif: plt.savefig(im_dir + '{}.png'.format(format(itern, "02"))) itern += 1 X2 = X1.copy() # Inpainting the zeros r2 = np.reshape(X1, (20,length,X1.shape[1]), order="F") X1 = np.reshape(inpaint.inpaint_biharmonic( r2,m_reshape,multichannel=True), (20*length,X1.shape[1]),order="F") ax0.plot([X2[d,0],X1[d,0]],[X2[d,1],X1[d,1]],'r') if ra: arrowsU.append([X2[d,0],X1[d,0]]) arrowsV.append([X2[d,1],X1[d,1]]) if binary: plt.imshow(y_reshape,cmap='brg') else: plt.imshow(np.reshape(X1[:,0], (20,length), order="F")) if savegif: plt.savefig(im_dir + '{}.png'.format(format(itern, "02"))) plt.clf() # X_old = X.copy() # np.save('bb',y_reshape) # plt.figure() # robust_mahal1.sort() # plt.plot(robust_mahal1) # plt.plot(250,robust_mahal1.max()*.87,'r*') # plt.savefig('./images2/{}.png'.format(format(itern, "02"))) # plt.figure() # robust_mahal2.sort() # plt.plot(robust_mahal2) # plt.plot(250,robust_mahal2.max()*.87,'r*') # plt.savefig('./images3/{}.png'.format(format(itern, "02"))) itern += 1 fig = plt.figure(figsize=(10,10)) ax0 = fig.add_subplot(111) colorz = ['b','r','g','m'] for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj) b = a == jj b = [i for i, x in enumerate(b) if x] if jj == 0: c = b ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
conditional_block
iterative_gmm.py
ell = Ellipse(mean, v[0]*4, v[1]*2, 180. + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(alpha) ell = Ellipse(mean, v[0]*2, v[1]*2, 180. + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(alpha) ax.add_artist(ell) # multi_normal = multivariate_normal(mean[0:2],u[0:2]) # ax.contour(np.sort(X[:,0]),np.sort(X[:,1]), # multi_normal.pdf(self.XY).reshape(X.shape[0],X.shape[0]), # colors='black',alpha=0.3) ax.scatter(mean[0],mean[1],c='grey',zorder=10,s=100) def iterative_gmm(self,dataset = 'bb',fake = True,mode = 'gmm',binary = False,im_dir = './images/',savegif = False,title ='temp',bic_thresh = 0,maxiter = 40,nc =5,v_and_1 = False,thresh = 0.9,cov=[],n_components=2,covt='spherical',ra=False,pca = True): ''' dataset: string The filename of the material something like 'bb','pp' fake: bool Whether or not the data is fake, if it is not it will be cropped mode: str 'fraction' will reduce the input to a combination of the relative signals e.g. bin1 - bin0/sum binary: bool Whether or not to show the output as binary or not nc: int pca components ''' # Clear the imagedir if savegif: folder = im_dir for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) #elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) if ra: arrowsU = [] arrowsV = [] bic0 = np.infty itern = 0 inds = [2,3,4] label_true = loadmat('2'+dataset+'_mask')['BW'] X1 = loadmat('all'+dataset)['Z'][:,inds] if not fake: X1 = X1[400:,:].copy() label_true = label_true[400:].copy() length = 48 else: length = 68 # This is code for just looking at the ratio of the bins if mode == 'fraction': # initialize vector for fraction X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))]) result = [x for x in itertools.combinations(np.arange(5),2)] for jj in range(0,X2.shape[1]): r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F") X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ind = np.argsort(np.mean(X1,0)) # X1 = X2[:,ind[:4]] else: for jj in range(0,X1.shape[1]): r2 = np.reshape(X1[:,jj], (20,length), order="F") X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ct = 'full' if mode == 'bgmm': bgmm = mixture.BayesianGaussianMixture( n_components=n_components, covariance_type=covt) elif mode == 'kmeans': km = KMeans(n_clusters=n_components) gmm = mixture.GaussianMixture(n_components=n_components, covariance_type=covt) gmm1 = mixture.GaussianMixture(n_components=n_components, covariance_type='full') ims = [] pca = True # Do the PCA decomposition if pca: X1 = PCA(n_components=nc).fit_transform(X1) X3 = X1[:,0:2].copy() fig = plt.figure(figsize=(10,10)) bics = [] for ii in range(0,maxiter): X = X1.copy() if mode == 'gmm': y_pred = gmm.fit_predict(X) elif mode == 'bgmm': y_pred = bgmm.fit_predict(X) y_ff = gmm.fit(X) elif mode == 'kmeans': y_pred = km.fit_predict(X) y_ff = gmm.fit(X) y_ff1 = gmm1.fit(X) # if I should show the vmeasure if v_and_1: homo1,comp1,vs1 = homogeneity_completeness_v_measure( label_true.squeeze(), y_pred) bic = gmm.aic(X) bic1 = gmm1.aic(X) print(vs1,itern,bic,bic1) else: bic = gmm.aic(X) print(bic) # Stop if bic is lower if bic - bic0 < bic_thresh: bic0 = bic else: print('BIC got higher') break print(bic) # map the bad values to zero for kk in range(n_components): temp = X[y_pred == kk,:] if cov == 'robust': robust_cov = MinCovDet().fit(temp) else: robust_cov = EmpiricalCovariance().fit(temp) # Calculating the mahal distances robust_mahal = robust_cov.mahalanobis( temp - robust_cov.location_) ** (0.33) if thresh < 1: temp[robust_mahal > robust_mahal.max()*thresh] = 0 else: # import pdb; pdb.set_trace() temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0 X[y_pred == kk,:] = temp mask_one = X[:,1] == 0 if y_pred[3] == 0: # Map top to zero if it is the wrong combo y_pred = y_pred + 1 y_pred[y_pred == n_components] = 0 m_reshape = np.reshape(mask_one, (20,length), order="F") if itern == 0: y_0 = y_pred # Plotting functions ax0 = fig.add_subplot(111) a = -(y_pred - label_true.squeeze()) y_reshape = np.reshape(a, (20,length), order="F") colorz = ['b','r','g','m'] for jj,color in zip(range(a.min(),a.max()+1),colorz): print(jj) b = a == jj b = [i for i, x in enumerate(b) if x] if jj == 0: c = b ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())]) ax0.set_title('New Method') self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full') if itern == 0: axes = plt.gca() ylim = axes.get_ylim() xlim = axes.get_xlim() ax0.set_xlim(xlim) ax0.set_ylim(ylim) ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1) plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03"))) ax3 = plt.axes([.22, .15, .15, .1]) bics.append
if ct == 'spherical': return color_iter = itertools.cycle(['navy', 'navy', 'cornflowerblue', 'gold', 'darkorange']) ax =plt.gca() for i, (mean, covar, color) in enumerate(zip( means, covariances, color_iter)): v, w = linalg.eigh(covar) v = 2. * np.sqrt(2.) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180. * angle / np.pi # convert to degrees alpha = 0.2 ell = Ellipse(mean, v[0], v[1], 180. + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(alpha) ax.add_artist(ell)
identifier_body
iterative_gmm.py
grey',zorder=10,s=100) def iterative_gmm(self,dataset = 'bb',fake = True,mode = 'gmm',binary = False,im_dir = './images/',savegif = False,title ='temp',bic_thresh = 0,maxiter = 40,nc =5,v_and_1 = False,thresh = 0.9,cov=[],n_components=2,covt='spherical',ra=False,pca = True): ''' dataset: string The filename of the material something like 'bb','pp' fake: bool Whether or not the data is fake, if it is not it will be cropped mode: str 'fraction' will reduce the input to a combination of the relative signals e.g. bin1 - bin0/sum binary: bool Whether or not to show the output as binary or not nc: int pca components ''' # Clear the imagedir if savegif: folder = im_dir for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) #elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) if ra: arrowsU = [] arrowsV = [] bic0 = np.infty itern = 0 inds = [2,3,4] label_true = loadmat('2'+dataset+'_mask')['BW'] X1 = loadmat('all'+dataset)['Z'][:,inds] if not fake: X1 = X1[400:,:].copy() label_true = label_true[400:].copy() length = 48 else: length = 68 # This is code for just looking at the ratio of the bins if mode == 'fraction': # initialize vector for fraction X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))]) result = [x for x in itertools.combinations(np.arange(5),2)] for jj in range(0,X2.shape[1]): r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F") X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ind = np.argsort(np.mean(X1,0)) # X1 = X2[:,ind[:4]] else: for jj in range(0,X1.shape[1]): r2 = np.reshape(X1[:,jj], (20,length), order="F") X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F") ct = 'full' if mode == 'bgmm': bgmm = mixture.BayesianGaussianMixture( n_components=n_components, covariance_type=covt) elif mode == 'kmeans': km = KMeans(n_clusters=n_components) gmm = mixture.GaussianMixture(n_components=n_components, covariance_type=covt) gmm1 = mixture.GaussianMixture(n_components=n_components, covariance_type='full') ims = [] pca = True # Do the PCA decomposition if pca: X1 = PCA(n_components=nc).fit_transform(X1) X3 = X1[:,0:2].copy() fig = plt.figure(figsize=(10,10)) bics = [] for ii in range(0,maxiter): X = X1.copy() if mode == 'gmm': y_pred = gmm.fit_predict(X) elif mode == 'bgmm': y_pred = bgmm.fit_predict(X) y_ff = gmm.fit(X) elif mode == 'kmeans': y_pred = km.fit_predict(X) y_ff = gmm.fit(X) y_ff1 = gmm1.fit(X) # if I should show the vmeasure if v_and_1: homo1,comp1,vs1 = homogeneity_completeness_v_measure( label_true.squeeze(), y_pred) bic = gmm.aic(X) bic1 = gmm1.aic(X) print(vs1,itern,bic,bic1) else: bic = gmm.aic(X) print(bic) # Stop if bic is lower if bic - bic0 < bic_thresh: bic0 = bic else: print('BIC got higher') break print(bic) # map the bad values to zero for kk in range(n_components): temp = X[y_pred == kk,:] if cov == 'robust': robust_cov = MinCovDet().fit(temp) else: robust_cov = EmpiricalCovariance().fit(temp) # Calculating the mahal distances robust_mahal = robust_cov.mahalanobis( temp - robust_cov.location_) ** (0.33) if thresh < 1: temp[robust_mahal > robust_mahal.max()*thresh] = 0 else: # import pdb; pdb.set_trace() temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0 X[y_pred == kk,:] = temp mask_one = X[:,1] == 0 if y_pred[3] == 0: # Map top to zero if it is the wrong combo y_pred = y_pred + 1 y_pred[y_pred == n_components] = 0 m_reshape = np.reshape(mask_one, (20,length), order="F") if itern == 0: y_0 = y_pred # Plotting functions ax0 = fig.add_subplot(111)
colorz = ['b','r','g','m'] for jj,color in zip(range(a.min(),a.max()+1),colorz): print(jj) b = a == jj b = [i for i, x in enumerate(b) if x] if jj == 0: c = b ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())]) ax0.set_title('New Method') self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full') if itern == 0: axes = plt.gca() ylim = axes.get_ylim() xlim = axes.get_xlim() ax0.set_xlim(xlim) ax0.set_ylim(ylim) ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1) plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03"))) ax3 = plt.axes([.22, .15, .15, .1]) bics.append(bic) plt.plot(bics) plt.yticks([]) plt.xlabel('iteration') plt.ylabel('BIC') ax2 = plt.axes([.25, .55, .6, .4], facecolor='y') if binary: plt.imshow(y_reshape,cmap='brg') else: plt.imshow(np.reshape(X1[:,0], (20,length), order="F")) plt.title('Image Space') plt.xticks([]) plt.yticks([]) if savegif: plt.savefig(im_dir + '{}.png'.format(format(itern, "02"))) itern += 1 i, j = np.where(m_reshape == True) # if binary: # plt.imshow(y_reshape,cmap='brg') # else: plt.scatter(j,i,marker='x',c='k') # import pdb; pdb.set_trace() d = [i for i, x in enumerate(mask_one) if x] ax0.scatter(X1[d,0],X1[d,1],marker='x',c='k') if savegif: plt.savefig(im_dir + '{}.png'.format(format(itern, "02"))) itern += 1 X2 = X1.copy() # Inpainting the zeros r2 = np.reshape(X1, (20,length,X1.shape[1]), order="F") X1 = np.reshape(inpaint.inpaint_biharmonic( r2,m_reshape,multichannel=True), (20*length,X1.shape[1]),order="F") ax0.plot([X2[d,0],X1[d,0]],[X2[d,1],X1[d,1]],'r') if ra: arrows
a = -(y_pred - label_true.squeeze()) y_reshape = np.reshape(a, (20,length), order="F")
random_line_split
error_format.rs
of type String. Usage: string.ends_with_regex(\"regex\")"; pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]"; pub const ERROR_STRING_SPLIT: &str = "[split] takes one parameter of type String. Usage: string.split(\"separator\")"; pub const ERROR_STRING_MATCH_REGEX: &str = "[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")"; pub const ERROR_STRING_POW: &str = "[pow] takes one parameter of type Float or Int. Usage: string.pow(number)"; pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with 'string.is_number() == true' "; pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with 'string.is_number() == true' to check it"; pub const ERROR_STRING_RHS: &str = "rhs must be of type string"; pub const ERROR_SLICE_ARG_INT: &str = ".slice(start, optional<end>) args need to be of type Integer"; pub const ERROR_SLICE_ARG_LEN: &str = ".slice(start, optional<end>) args need to be inferior to the string length"; pub const ERROR_SLICE_ARG2: &str = ".slice(start, optional<end>) end need to be superior to start in value ex: .slice(2, 5)"; pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String"; // #### Array pub const ERROR_ARRAY_TYPE: &str = "value must be of type array"; pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist"; pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int"; pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]"; pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()"; pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is "; pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty"; pub const ERROR_ARRAY_INSERT_AT: &str = "[insert_at] takes two arguments. Usage: array.insert_at(1, elem)"; pub const ERROR_ARRAY_INSERT_AT_INT: &str = "[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)"; pub const ERROR_ARRAY_REMOVE_AT: &str = "[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) "; pub const ERROR_ARRAY_JOIN: &str = "[join] takes one parameter of type String. Usage: array.join(\"elem\") "; pub const ERROR_ARRAY_INDEX_OF: &str = "[index_of] takes one parameter. Usage: array.index_of(elem)"; pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)"; pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array"; // #### CRYPTO OBJECT // ## HMAC and HASH OBJECT pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String"; pub const ERROR_HASH_ALGO: &str = "Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512"; pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string"; pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly"; pub const ERROR_DIGEST_ALGO: &str = "Invalid Digest Algorithm, supported Algorithms are hex, base64"; // #### JWT OBJECT pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512"; pub const ERROR_JWT_SECRET: &str = "secret must be of type String"; pub const ERROR_JWT_SIGN_CLAIMS: &str = "JWT(claims) command expect argument 'claims' of type Object"; pub const ERROR_JWT_SIGN_ALGO: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String"; pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String"; pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String"; pub const ERROR_JWT_DECODE_ALGO: &str = "JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String"; pub const ERROR_JWT_DECODE_SECRET: &str = "JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String"; pub const ERROR_JWT_VALIDATION_CLAIMS: &str = "JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object"; pub const ERROR_JWT_VALIDATION_ALGO: &str = "JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String"; pub const ERROR_JWT_VALIDATION_SECRETE: &str = "JWT(jwt).verify(claims, algo, secret) expect third argument 'secrete' of type String"; // #### HTTP OBJECT pub const ERROR_HTTP_SET: &str = "[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )"; pub const ERROR_HTTP_QUERY: &str = "[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )"; pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage"; pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP"; // #### OBJECT pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object"; pub const ERROR_OBJECT_GET: &str = "key does not exist"; pub const ERROR_OBJECT_CONTAINS: &str = "[contains] takes one argument of type String. Usage: object.contains(\"key\")"; pub const ERROR_OBJECT_GET_GENERICS: &str = "[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")"; pub const ERROR_OBJECT_INSERT: &str = "[insert] take tow arguments. Usage: object.insert(string, any_type)"; pub const ERROR_OBJECT_ASSIGN: &str = "[assign] take one argument. Usage: object.assign({\"key\": \"value\"})"; pub const ERROR_OBJECT_REMOVE: &str = "[remove] takes one argument of type String. Usage: object.remove(\"key\")"; pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String"; pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object"; // #### METHODS pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named"; pub const ERROR_OPS: &str = "[!] Ops: Illegal operation"; pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero"; pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero"; pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:"; pub const OVERFLOWING_OPERATION: &str = "overflowing operation:"; //////////////////////////////////////////////////////////////////////////////// // PRiVTE FUNCTION //////////////////////////////////////////////////////////////////////////////// fn add_context_to_error_message<'a>( flow_slice: Span<'a>, message: String, line_number: u32, column: usize, offset: usize, ) -> String { use std::fmt::Write; let mut result = String::new(); let prefix = &flow_slice.fragment().as_bytes()[..offset]; // Find the line that includes the subslice: // Find the *last* newline before the substring starts let line_begin = prefix .iter() .rev() .position(|&b| b == b'\n') .map(|pos| offset - pos) .unwrap_or(0); // Find the full line after that newline let line = flow_slice.fragment()[line_begin..] .lines() .next() .unwrap_or(&flow_slice.fragment()[line_begin..]) .trim_end(); write!( &mut result, "at line {line_number},\n\ {line}\n\ {caret:>column$}\n\ {context}\n\n", line_number = line_number, context = message, line = line, caret = '^', column = column, ) // Because `write!` to a `String` is infallible, this `unwrap` is fine. .unwrap(); result } //////////////////////////////////////////////////////////////////////////////// // PUBLIC FUNCTION //////////////////////////////////////////////////////////////////////////////// pub fn gen_error_info(position: Position, message: String) -> ErrorInfo { ErrorInfo::new(position, message) } pub fn gen_warning_info(position: Position, message: String) -> Warnings { Warnings { position, message } } pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E> where E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{ Err::Error(E::add_context( span, error, E::from_error_kind(span, ErrorKind::Tag), )) }
identifier_body
error_format.rs
= "Url component expects one argument of type string and 2 optional string arguments: text, title. Example: Url(\"hola\", text = \"text\", title = \"title\")"; pub const ERROR_VIDEO: &str = "Video component expects one argument of type string. Example: Video(url = \"hola\")"; pub const ERROR_AUDIO: &str = "Audio component expects one argument of type string. Example: Audio(url = \"hola\")"; pub const ERROR_FILE: &str = "File component expects one argument of type string. Example: File(url = \"hola\")"; pub const ERROR_HTTP_GET_VALUE: &str = "not found in HTTP object. Use the HTTP() builtin to construct the correct object to make HTTP calls"; pub const ERROR_HTTP_QUERY_VALUES: &str = "must have a value of type String. Example: {key: \"value\"}"; pub const ERROR_HTTP: &str = "HTTP builtin expects one url of type string. Example: HTTP(\"https://clevy.io\")"; pub const ERROR_JWT: &str = "JWT builtin expects payload as argument. Example: JWT({ \"user\": \"name\", \"somekey\": { \"somevalue\": 42 }, \"exp\": 1618064023, \"iss\": \"CSML STUDIO\" })"; pub const ERROR_SMTP: &str = "SMTP builtin expects SMTP Server Address. Example: SMTP(\"smtp.gmail.com\")"; pub const ERROR_CRYPTO: &str = "CRYPTO builtin expects one argument of type string. Example: CRYPTO(\"text\")"; pub const ERROR_BUILTIN_UNKNOWN: &str = "Unknown builtin"; // ### native Components pub const ERROR_HTTP_NOT_DATA: &str = "bad format: no 'data' in HTTP response"; pub const ERROR_NATIVE_COMPONENT: &str = "native component does not exist"; // ### Constants pub const ERROR_CONSTANT_MUTABLE_FUNCTION: &str = "Invalid operation constants can not execute self mutable functions"; pub const ERROR_INVALID_CONSTANT_EXPR: &str = "Constant invalid expression type: constants can not be assign this type of expression"; // ### Primitives // #### Indexing pub const ERROR_INDEXING: &str = "indexing can only be done in ARRAY, OBJECT or STRING primitive types"; // #### Closure pub const ERROR_CLOSURE_UNKNOWN_METHOD: &str = "Closure don't have methods"; // #### Boolean pub const ERROR_BOOLEAN_UNKNOWN_METHOD: &str = "is not a method of Boolean"; // #### NUMBER pub const ERROR_NUMBER_POW: &str = "[pow] takes one parameter of type int or float usage: number.pow(42)"; // #### Float pub const ERROR_FLOAT_UNKNOWN_METHOD: &str = "is not a method of Float"; // #### Int pub const ERROR_INT_UNKNOWN_METHOD: &str = "is not a method of Int"; // #### Null pub const ERROR_NULL_UNKNOWN_METHOD: &str = "is not a method of Null"; // #### String pub const ERROR_STRING_DO_MATCH: &str = "[do_match] takes one parameter of type String. Usage: string.do_match(\"tag\")"; pub const ERROR_STRING_APPEND: &str = "[append] takes one parameter of type String. Usage: string.append(\"text to append\")"; pub const ERROR_STRING_CONTAINS: &str = "[contains] takes one parameter of type String. Usage: string.contains(\"word\")"; pub const ERROR_STRING_REPLACE: &str = "[replace] takes tow parameter of type String. Usage: \"this is old\".replace(\"old\", \"new\")"; pub const ERROR_STRING_REPLACE_ALL: &str = "[replace_all] takes tow parameter of type String. Usage: \"old old old old\".replace_all(\"old\", \"new\")"; pub const ERROR_STRING_REPLACE_REGEX: &str = "[replace_regex] takes tow parameter of type String. Usage: \"hello world\".replace_regex(\"world\", \"Clevy\")"; pub const ERROR_STRING_CONTAINS_REGEX: &str = "[contains_regex] takes one parameter of type String. Usage: string.contains_regex(\"regex\")"; pub const ERROR_STRING_VALID_REGEX: &str = "parameter must be a valid regex expression"; // link to docs pub const ERROR_STRING_START_WITH: &str = "[starts_with] takes one parameter of type String. Usage: string.starts_with(\"tag\")"; pub const ERROR_STRING_START_WITH_REGEX: &str = "[starts_with_regex] takes one parameter of type String. Usage: string.start_with_regex(\"regex\")"; pub const ERROR_STRING_END_WITH: &str = "[ends_with] takes one parameter of type String. Usage: string.ends_with(\"tag\")"; pub const ERROR_STRING_END_WITH_REGEX: &str = "[ends_with_regex] takes one parameter of type String. Usage: string.ends_with_regex(\"regex\")"; pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]"; pub const ERROR_STRING_SPLIT: &str = "[split] takes one parameter of type String. Usage: string.split(\"separator\")"; pub const ERROR_STRING_MATCH_REGEX: &str = "[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")"; pub const ERROR_STRING_POW: &str = "[pow] takes one parameter of type Float or Int. Usage: string.pow(number)"; pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with 'string.is_number() == true' "; pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with 'string.is_number() == true' to check it"; pub const ERROR_STRING_RHS: &str = "rhs must be of type string"; pub const ERROR_SLICE_ARG_INT: &str = ".slice(start, optional<end>) args need to be of type Integer"; pub const ERROR_SLICE_ARG_LEN: &str = ".slice(start, optional<end>) args need to be inferior to the string length"; pub const ERROR_SLICE_ARG2: &str = ".slice(start, optional<end>) end need to be superior to start in value ex: .slice(2, 5)"; pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String"; // #### Array pub const ERROR_ARRAY_TYPE: &str = "value must be of type array"; pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist"; pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int"; pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]"; pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()"; pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is "; pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty"; pub const ERROR_ARRAY_INSERT_AT: &str = "[insert_at] takes two arguments. Usage: array.insert_at(1, elem)"; pub const ERROR_ARRAY_INSERT_AT_INT: &str = "[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)"; pub const ERROR_ARRAY_REMOVE_AT: &str = "[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) "; pub const ERROR_ARRAY_JOIN: &str = "[join] takes one parameter of type String. Usage: array.join(\"elem\") "; pub const ERROR_ARRAY_INDEX_OF: &str = "[index_of] takes one parameter. Usage: array.index_of(elem)"; pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)"; pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array"; // #### CRYPTO OBJECT // ## HMAC and HASH OBJECT pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String"; pub const ERROR_HASH_ALGO: &str = "Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512"; pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string"; pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly"; pub const ERROR_DIGEST_ALGO: &str = "Invalid Digest Algorithm, supported Algorithms are hex, base64"; // #### JWT OBJECT pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512"; pub const ERROR_JWT_SECRET: &str = "secret must be of type String"; pub const ERROR_JWT_SIGN_CLAIMS: &str = "JWT(claims) command expect argument 'claims' of type Object"; pub const ERROR_JWT_SIGN_ALGO: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String"; pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String"; pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String"; pub const ERROR_JWT_DECODE_ALGO: &str = "JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String"; pub const ERROR_JWT_DECODE_SECRET: &str =
random_line_split
error_format.rs
string.ends_with_regex(\"regex\")"; pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]"; pub const ERROR_STRING_SPLIT: &str = "[split] takes one parameter of type String. Usage: string.split(\"separator\")"; pub const ERROR_STRING_MATCH_REGEX: &str = "[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")"; pub const ERROR_STRING_POW: &str = "[pow] takes one parameter of type Float or Int. Usage: string.pow(number)"; pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with 'string.is_number() == true' "; pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with 'string.is_number() == true' to check it"; pub const ERROR_STRING_RHS: &str = "rhs must be of type string"; pub const ERROR_SLICE_ARG_INT: &str = ".slice(start, optional<end>) args need to be of type Integer"; pub const ERROR_SLICE_ARG_LEN: &str = ".slice(start, optional<end>) args need to be inferior to the string length"; pub const ERROR_SLICE_ARG2: &str = ".slice(start, optional<end>) end need to be superior to start in value ex: .slice(2, 5)"; pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String"; // #### Array pub const ERROR_ARRAY_TYPE: &str = "value must be of type array"; pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist"; pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int"; pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]"; pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()"; pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is "; pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty"; pub const ERROR_ARRAY_INSERT_AT: &str = "[insert_at] takes two arguments. Usage: array.insert_at(1, elem)"; pub const ERROR_ARRAY_INSERT_AT_INT: &str = "[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)"; pub const ERROR_ARRAY_REMOVE_AT: &str = "[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) "; pub const ERROR_ARRAY_JOIN: &str = "[join] takes one parameter of type String. Usage: array.join(\"elem\") "; pub const ERROR_ARRAY_INDEX_OF: &str = "[index_of] takes one parameter. Usage: array.index_of(elem)"; pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)"; pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array"; // #### CRYPTO OBJECT // ## HMAC and HASH OBJECT pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String"; pub const ERROR_HASH_ALGO: &str = "Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512"; pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string"; pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly"; pub const ERROR_DIGEST_ALGO: &str = "Invalid Digest Algorithm, supported Algorithms are hex, base64"; // #### JWT OBJECT pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512"; pub const ERROR_JWT_SECRET: &str = "secret must be of type String"; pub const ERROR_JWT_SIGN_CLAIMS: &str = "JWT(claims) command expect argument 'claims' of type Object"; pub const ERROR_JWT_SIGN_ALGO: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String"; pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String"; pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String"; pub const ERROR_JWT_DECODE_ALGO: &str = "JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String"; pub const ERROR_JWT_DECODE_SECRET: &str = "JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String"; pub const ERROR_JWT_VALIDATION_CLAIMS: &str = "JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object"; pub const ERROR_JWT_VALIDATION_ALGO: &str = "JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String"; pub const ERROR_JWT_VALIDATION_SECRETE: &str = "JWT(jwt).verify(claims, algo, secret) expect third argument 'secrete' of type String"; // #### HTTP OBJECT pub const ERROR_HTTP_SET: &str = "[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )"; pub const ERROR_HTTP_QUERY: &str = "[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )"; pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage"; pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP"; // #### OBJECT pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object"; pub const ERROR_OBJECT_GET: &str = "key does not exist"; pub const ERROR_OBJECT_CONTAINS: &str = "[contains] takes one argument of type String. Usage: object.contains(\"key\")"; pub const ERROR_OBJECT_GET_GENERICS: &str = "[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")"; pub const ERROR_OBJECT_INSERT: &str = "[insert] take tow arguments. Usage: object.insert(string, any_type)"; pub const ERROR_OBJECT_ASSIGN: &str = "[assign] take one argument. Usage: object.assign({\"key\": \"value\"})"; pub const ERROR_OBJECT_REMOVE: &str = "[remove] takes one argument of type String. Usage: object.remove(\"key\")"; pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String"; pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object"; // #### METHODS pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named"; pub const ERROR_OPS: &str = "[!] Ops: Illegal operation"; pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero"; pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero"; pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:"; pub const OVERFLOWING_OPERATION: &str = "overflowing operation:"; //////////////////////////////////////////////////////////////////////////////// // PRiVTE FUNCTION //////////////////////////////////////////////////////////////////////////////// fn add_context_to_error_message<'a>( flow_slice: Span<'a>, message: String, line_number: u32, column: usize, offset: usize, ) -> String { use std::fmt::Write; let mut result = String::new(); let prefix = &flow_slice.fragment().as_bytes()[..offset]; // Find the line that includes the subslice: // Find the *last* newline before the substring starts let line_begin = prefix .iter() .rev() .position(|&b| b == b'\n') .map(|pos| offset - pos) .unwrap_or(0); // Find the full line after that newline let line = flow_slice.fragment()[line_begin..] .lines() .next() .unwrap_or(&flow_slice.fragment()[line_begin..]) .trim_end(); write!( &mut result, "at line {line_number},\n\ {line}\n\ {caret:>column$}\n\ {context}\n\n", line_number = line_number, context = message, line = line, caret = '^', column = column, ) // Because `write!` to a `String` is infallible, this `unwrap` is fine. .unwrap(); result } //////////////////////////////////////////////////////////////////////////////// // PUBLIC FUNCTION //////////////////////////////////////////////////////////////////////////////// pub fn gen_error_info(position: Position, message: String) -> ErrorInfo { ErrorInfo::new(position, message) } pub fn gen_warning_info(position: Position, message: String) -> Warnings { Warnings { position, message } } pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E> where E: ParseError<Span<'a>> + ContextError<Span<'a>>, { Err::Error(E::add_context( span, error, E::from_error_kind(span, ErrorKind::Tag), )) } pub fn
gen_nom_failure
identifier_name
types.go
2(len(result))) b.write([]byte(result)) case *ast.DUuid: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := b.putbuf[4 : 4+36] v.UUID.StringBytes(s) b.putInt32(int32(len(s))) b.write(s) case *ast.DIPAddr: b.writeLengthPrefixedString(v.IPAddr.String()) case *ast.DString: b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t)) case *ast.DCollatedString: b.writeLengthPrefixedString(v.Contents) case *ast.DDate: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DTime: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTime(timeofday.TimeOfDay(*v), b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DTimeTZ: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTimeTZ(v.TimeTZ, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DGeography: s := v.Geography.EWKBHex() b.putInt32(int32(len(s))) b.write([]byte(s)) case *ast.DGeometry: s := v.Geometry.EWKBHex() b.putInt32(int32(len(s))) b.write([]byte(s)) case *ast.DTimestamp: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTs(v.Time, nil, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DTimestampTZ: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTs(v.Time, conv.Location, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DInterval: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DJSON: b.writeLengthPrefixedString(v.JSON.String()) case *ast.DTuple: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DArray: // Arrays have custom formatting depending on their OID. b.textFormatter.FormatNode(d) b.writeFromFmtCtx(b.textFormatter) case *ast.DOid: b.writeLengthPrefixedDatum(v) case *ast.DEnum: // Enums are serialized with their logical representation. b.writeLengthPrefixedString(v.LogicalRep) default: b.setError(errors.Errorf("unsupported type %T", d)) } } // writeBinaryDatum writes d to the buffer. Type t must be specified for types // that have various width encodings (floats, ints, chars). It is ignored // (and can be nil) for types with a 1:1 datum:type mapping. func (b *writeBuffer) writeBinaryDatum( ctx context.Context, d ast.Datum, sessionLoc *time.Location, t *types.T, ) { if log.V(2) { log.Infof(ctx, "pgwire writing BINARY datum of type: %T, %#v", d, d) } if d == ast.DNull { // NULL is encoded as -1; all other values have a length prefix. b.putInt32(-1) return } switch v := ast.UnwrapDatum(nil, d).(type) { case *ast.DBitArray: words, lastBitsUsed := v.EncodingParts() if len(words) == 0 { b.putInt32(4) } else { // Encode the length of the output bytes. It is computed here so we don't // have to keep a buffer. // 4: the int32 of the bitLen. // 8*(len(words)-1): number of 8-byte words except the last one since it's // partial. // (lastBitsUsed+7)/8: number of bytes that will be written in the last // partial word. The /8 rounds down, such that the +7 will cause 1-or-more // bits to use a byte, but 0 will not. b.putInt32(4 + int32(8*(len(words)-1)) + int32((lastBitsUsed+7)/8)) } bitLen := v.BitLen() b.putInt32(int32(bitLen)) var byteBuf [8]byte for i := 0; i < len(words)-1; i++ { w := words[i] binary.BigEndian.PutUint64(byteBuf[:], w) b.write(byteBuf[:]) } if len(words) > 0 { w := words[len(words)-1] for i := uint(0); i < uint(lastBitsUsed); i += 8 { c := byte(w >> (56 - i)) b.writeByte(c) } } case *ast.DBool: b.putInt32(1) if *v
else { b.writeByte(0) } case *ast.DInt: switch t.Oid() { case oid.T_int2: b.putInt32(2) b.putInt16(int16(*v)) case oid.T_int4: b.putInt32(4) b.putInt32(int32(*v)) case oid.T_int8: b.putInt32(8) b.putInt64(int64(*v)) default: b.setError(errors.Errorf("unsupported int oid: %v", t.Oid())) } case *ast.DFloat: switch t.Oid() { case oid.T_float4: b.putInt32(4) b.putInt32(int32(math.Float32bits(float32(*v)))) case oid.T_float8: b.putInt32(8) b.putInt64(int64(math.Float64bits(float64(*v)))) default: b.setError(errors.Errorf("unsupported float oid: %v", t.Oid())) } case *ast.DDecimal: if v.Form != apd.Finite { b.putInt32(8) // 0 digits. b.putInt32(0) // https://github.com/postgres/postgres/blob/ffa4cbd623dd69f9fa99e5e92426928a5782cf1a/src/backend/utils/adt/numeric.c#L169 b.write([]byte{0xc0, 0, 0, 0}) if v.Form == apd.Infinite { // TODO(mjibson): #32489 // The above encoding is not correct for Infinity, but since that encoding // doesn't exist in postgres, it's unclear what to do. For now use the NaN // encoding and count it to see if anyone even needs this. telemetry.Inc(sqltelemetry.BinaryDecimalInfinityCounter) } return } alloc := struct { pgNum pgwirebase.PGNumeric bigI big.Int }{ pgNum: pgwirebase.PGNumeric{ // Since we use 2000 as the exponent limits in ast.DecimalCtx, this // conversion should not overflow. Dscale: int16(-v.Exponent), }, } if v.Sign() >= 0 { alloc.pgNum.Sign = pgwirebase.PGNumericPos } else { alloc.pgNum.Sign = pgwirebase.PGNumericNeg } isZero := func(r rune) bool { return r == '0' } // Mostly cribbed from libpqtypes' str2num. digits := strings.TrimLeftFunc(alloc.bigI.Abs(&v.Coeff).String(), isZero) dweight := len(digits) - int(alloc.pgNum.Dscale) - 1 digits = strings.TrimRightFunc(digits, isZero) if dweight >= 0 { alloc.pgNum.Weight = int16((dweight+1+pgwirebase.PGDecDigits-1)/pgwirebase.PGDecDigits - 1) } else { alloc.pgNum.Weight = int16(-((-dweight-1)/pgwirebase.PGDecDigits + 1)) } offset := (int(alloc.pgNum.Weight)+1)*pgwirebase.PGDecDigits - (dweight + 1) alloc.pgNum.Ndigits = int16((len(digits) + offset + pgwirebase.PGDecDigits - 1) / pgwirebase.PGDecDigits) if len(digits) == 0 {
{ b.writeByte(1) }
conditional_block
types.go
s := strconv.AppendInt(b.putbuf[4:4], int64(*v), 10) b.putInt32(int32(len(s))) b.write(s) case *ast.DFloat: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := strconv.AppendFloat(b.putbuf[4:4], float64(*v), 'g', conv.GetFloatPrec(), 64) b.putInt32(int32(len(s))) b.write(s) case *ast.DDecimal: b.writeLengthPrefixedDatum(v) case *ast.DBytes: result := lex.EncodeByteArrayToRawBytes( string(*v), conv.BytesEncodeFormat, false /* skipHexPrefix */) b.putInt32(int32(len(result))) b.write([]byte(result)) case *ast.DUuid: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := b.putbuf[4 : 4+36] v.UUID.StringBytes(s) b.putInt32(int32(len(s))) b.write(s) case *ast.DIPAddr: b.writeLengthPrefixedString(v.IPAddr.String()) case *ast.DString: b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t)) case *ast.DCollatedString: b.writeLengthPrefixedString(v.Contents) case *ast.DDate: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DTime: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTime(timeofday.TimeOfDay(*v), b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DTimeTZ: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTimeTZ(v.TimeTZ, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DGeography: s := v.Geography.EWKBHex() b.putInt32(int32(len(s))) b.write([]byte(s)) case *ast.DGeometry: s := v.Geometry.EWKBHex() b.putInt32(int32(len(s))) b.write([]byte(s)) case *ast.DTimestamp: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTs(v.Time, nil, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DTimestampTZ: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTs(v.Time, conv.Location, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DInterval: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DJSON: b.writeLengthPrefixedString(v.JSON.String()) case *ast.DTuple: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DArray: // Arrays have custom formatting depending on their OID. b.textFormatter.FormatNode(d) b.writeFromFmtCtx(b.textFormatter) case *ast.DOid: b.writeLengthPrefixedDatum(v) case *ast.DEnum: // Enums are serialized with their logical representation. b.writeLengthPrefixedString(v.LogicalRep) default: b.setError(errors.Errorf("unsupported type %T", d)) } } // writeBinaryDatum writes d to the buffer. Type t must be specified for types // that have various width encodings (floats, ints, chars). It is ignored // (and can be nil) for types with a 1:1 datum:type mapping. func (b *writeBuffer) writeBinaryDatum( ctx context.Context, d ast.Datum, sessionLoc *time.Location, t *types.T, ) { if log.V(2) { log.Infof(ctx, "pgwire writing BINARY datum of type: %T, %#v", d, d) } if d == ast.DNull { // NULL is encoded as -1; all other values have a length prefix. b.putInt32(-1) return } switch v := ast.UnwrapDatum(nil, d).(type) { case *ast.DBitArray: words, lastBitsUsed := v.EncodingParts() if len(words) == 0 { b.putInt32(4) } else { // Encode the length of the output bytes. It is computed here so we don't // have to keep a buffer. // 4: the int32 of the bitLen. // 8*(len(words)-1): number of 8-byte words except the last one since it's // partial. // (lastBitsUsed+7)/8: number of bytes that will be written in the last // partial word. The /8 rounds down, such that the +7 will cause 1-or-more // bits to use a byte, but 0 will not. b.putInt32(4 + int32(8*(len(words)-1)) + int32((lastBitsUsed+7)/8)) } bitLen := v.BitLen() b.putInt32(int32(bitLen)) var byteBuf [8]byte for i := 0; i < len(words)-1; i++ { w := words[i] binary.BigEndian.PutUint64(byteBuf[:], w) b.write(byteBuf[:]) } if len(words) > 0 { w := words[len(words)-1] for i := uint(0); i < uint(lastBitsUsed); i += 8 { c := byte(w >> (56 - i)) b.writeByte(c) } } case *ast.DBool: b.putInt32(1) if *v { b.writeByte(1) } else { b.writeByte(0) } case *ast.DInt: switch t.Oid() { case oid.T_int2: b.putInt32(2) b.putInt16(int16(*v)) case oid.T_int4: b.putInt32(4) b.putInt32(int32(*v)) case oid.T_int8: b.putInt32(8) b.putInt64(int64(*v)) default: b.setError(errors.Errorf("unsupported int oid: %v", t.Oid())) } case *ast.DFloat: switch t.Oid() { case oid.T_float4: b.putInt32(4) b.putInt32(int32(math.Float32bits(float32(*v)))) case oid.T_float8: b.putInt32(8) b.putInt64(int64(math.Float64bits(float64(*v)))) default: b.setError(errors.Errorf("unsupported float oid: %v", t.Oid())) } case *ast.DDecimal: if v.Form != apd.Finite { b.putInt32(8) // 0 digits. b.putInt32(0) // https://github.com/postgres/postgres/blob/ffa4cbd623dd69f9fa99e5e92426928a5782cf1a/src/backend/utils/adt/numeric.c#L169 b.write([]byte{0xc0, 0, 0, 0}) if v.Form == apd.Infinite { // TODO(mjibson): #32489 // The above encoding is not correct for Infinity, but since that encoding // doesn't exist in postgres, it's unclear what to do. For now use the NaN // encoding and count it to see if anyone even needs this. telemetry.Inc(sqltelemetry.BinaryDecimalInfinityCounter) } return } alloc := struct { pgNum pgwirebase.PGNumeric bigI big.Int }{ pgNum: pgwirebase.PGNumeric{ // Since we
{ if log.V(2) { log.Infof(ctx, "pgwire writing TEXT datum of type: %T, %#v", d, d) } if d == ast.DNull { // NULL is encoded as -1; all other values have a length prefix. b.putInt32(-1) return } switch v := ast.UnwrapDatum(nil, d).(type) { case *ast.DBitArray: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DBool: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DInt: // Start at offset 4 because `putInt32` clobbers the first 4 bytes.
identifier_body
types.go
oid oid.Oid // Variable-size types have size=-1. // Note that the protocol has both int16 and int32 size fields, // so this attribute is an unsized int and should be cast // as needed. // This field does *not* correspond to the encoded length of a // data type, so it's unclear what, if anything, it is used for. // To get the right value, "SELECT oid, typlen FROM pg_type" // on a postgres server. size int } func pgTypeForParserType(t *types.T) pgType { size := -1 if s, variable := ast.DatumTypeSize(t); !variable { size = int(s) } return pgType{ oid: t.Oid(), size: size, } } // resolveBlankPaddedChar pads the given string with spaces if blank padding is // required or returns the string unmodified otherwise. func resolveBlankPaddedChar(s string, t *types.T) string { if t.Oid() == oid.T_bpchar { // Pad spaces on the right of the string to make it of length specified in // the type t. return fmt.Sprintf("%-*v", t.Width(), s) } return s } // writeTextDatum writes d to the buffer. Type t must be specified for types // that have various width encodings and therefore need padding (chars). // It is ignored (and can be nil) for types which do not need padding. func (b *writeBuffer) writeTextDatum( ctx context.Context, d ast.Datum, conv sessiondata.DataConversionConfig, t *types.T, ) { if log.V(2) { log.Infof(ctx, "pgwire writing TEXT datum of type: %T, %#v", d, d) } if d == ast.DNull { // NULL is encoded as -1; all other values have a length prefix. b.putInt32(-1) return } switch v := ast.UnwrapDatum(nil, d).(type) { case *ast.DBitArray: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DBool: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DInt: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := strconv.AppendInt(b.putbuf[4:4], int64(*v), 10) b.putInt32(int32(len(s))) b.write(s) case *ast.DFloat: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := strconv.AppendFloat(b.putbuf[4:4], float64(*v), 'g', conv.GetFloatPrec(), 64) b.putInt32(int32(len(s))) b.write(s) case *ast.DDecimal: b.writeLengthPrefixedDatum(v) case *ast.DBytes: result := lex.EncodeByteArrayToRawBytes( string(*v), conv.BytesEncodeFormat, false /* skipHexPrefix */) b.putInt32(int32(len(result))) b.write([]byte(result)) case *ast.DUuid: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := b.putbuf[4 : 4+36] v.UUID.StringBytes(s) b.putInt32(int32(len(s))) b.write(s) case *ast.DIPAddr: b.writeLengthPrefixedString(v.IPAddr.String()) case *ast.DString: b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t)) case *ast.DCollatedString: b.writeLengthPrefixedString(v.Contents) case *ast.DDate: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DTime: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTime(timeofday.TimeOfDay(*v), b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DTimeTZ: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTimeTZ(v.TimeTZ, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DGeography: s := v.Geography.EWKBHex() b.putInt32(int32(len(s))) b.write([]byte(s)) case *ast.DGeometry: s := v.Geometry.EWKBHex() b.putInt32(int32(len(s))) b.write([]byte(s)) case *ast.DTimestamp: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTs(v.Time, nil, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DTimestampTZ: // Start at offset 4 because `putInt32` clobbers the first 4 bytes. s := formatTs(v.Time, conv.Location, b.putbuf[4:4]) b.putInt32(int32(len(s))) b.write(s) case *ast.DInterval: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DJSON: b.writeLengthPrefixedString(v.JSON.String()) case *ast.DTuple: b.textFormatter.FormatNode(v) b.writeFromFmtCtx(b.textFormatter) case *ast.DArray: // Arrays have custom formatting depending on their OID. b.textFormatter.FormatNode(d) b.writeFromFmtCtx(b.textFormatter) case *ast.DOid: b.writeLengthPrefixedDatum(v) case *ast.DEnum: // Enums are serialized with their logical representation. b.writeLengthPrefixedString(v.LogicalRep) default: b.setError(errors.Errorf("unsupported type %T", d)) } } // writeBinaryDatum writes d to the buffer. Type t must be specified for types // that have various width encodings (floats, ints, chars). It is ignored // (and can be nil) for types with a 1:1 datum:type mapping. func (b *writeBuffer) writeBinaryDatum( ctx context.Context, d ast.Datum, sessionLoc *time.Location, t *types.T, ) { if log.V(2) { log.Infof(ctx, "pgwire writing BINARY datum of type: %T, %#v", d, d) } if d == ast.DNull { // NULL is encoded as -1; all other values have a length prefix. b.putInt32(-1) return } switch v := ast.UnwrapDatum(nil, d).(type) { case *ast.DBitArray: words, lastBitsUsed := v.EncodingParts() if len(words) == 0 { b.putInt32(4) } else { // Encode the length of the output bytes. It is computed here so we don't // have to keep a buffer. // 4: the int32 of the bitLen. // 8*(len(words)-1): number of 8-byte words except the last one since it's // partial. // (lastBitsUsed+7)/8: number of bytes that will be written in the last // partial word. The /8 rounds down, such that the +7 will cause 1-or-more // bits to use a byte, but 0 will not. b.putInt32(4 + int32(8*(len(words)-1)) + int32((lastBitsUsed+7)/8)) } bitLen := v.BitLen() b.putInt32(int32(bitLen)) var byteBuf [8]byte for i := 0; i < len(words)-1; i++ { w := words[i] binary.BigEndian.PutUint64(byteBuf[:], w) b.write(byteBuf[:]) } if len(words) > 0 { w := words[len(words)-1] for i := uint(0); i < uint(lastBitsUsed); i += 8 { c := byte(w >> (56 - i)) b.writeByte(c) } } case *ast.DBool: b.putInt32(1) if *v { b.writeByte(1) } else { b.writeByte(0) } case *ast.DInt: switch t.Oid() { case oid.T_int2: b.putInt32(2) b.putInt16(int16(*v)) case oid.T_int4: b.putInt32(4) b.putInt32(int32(*v)) case oid.T_int8: b.putInt32(8) b.putInt64(int64(*v)) default: b.setError(errors.Errorf("unsupported int
) // pgType contains type metadata used in RowDescription messages. type pgType struct {
random_line_split
types.go
igits := strings.TrimLeftFunc(alloc.bigI.Abs(&v.Coeff).String(), isZero) dweight := len(digits) - int(alloc.pgNum.Dscale) - 1 digits = strings.TrimRightFunc(digits, isZero) if dweight >= 0 { alloc.pgNum.Weight = int16((dweight+1+pgwirebase.PGDecDigits-1)/pgwirebase.PGDecDigits - 1) } else { alloc.pgNum.Weight = int16(-((-dweight-1)/pgwirebase.PGDecDigits + 1)) } offset := (int(alloc.pgNum.Weight)+1)*pgwirebase.PGDecDigits - (dweight + 1) alloc.pgNum.Ndigits = int16((len(digits) + offset + pgwirebase.PGDecDigits - 1) / pgwirebase.PGDecDigits) if len(digits) == 0 { offset = 0 alloc.pgNum.Ndigits = 0 alloc.pgNum.Weight = 0 } digitIdx := -offset nextDigit := func() int16 { var ndigit int16 for nextDigitIdx := digitIdx + pgwirebase.PGDecDigits; digitIdx < nextDigitIdx; digitIdx++ { ndigit *= 10 if digitIdx >= 0 && digitIdx < len(digits) { ndigit += int16(digits[digitIdx] - '0') } } return ndigit } b.putInt32(int32(2 * (4 + alloc.pgNum.Ndigits))) b.putInt16(alloc.pgNum.Ndigits) b.putInt16(alloc.pgNum.Weight) b.putInt16(int16(alloc.pgNum.Sign)) b.putInt16(alloc.pgNum.Dscale) for digitIdx < len(digits) { b.putInt16(nextDigit()) } case *ast.DBytes: b.putInt32(int32(len(*v))) b.write([]byte(*v)) case *ast.DUuid: b.putInt32(16) b.write(v.GetBytes()) case *ast.DIPAddr: // We calculate the Postgres binary format for an IPAddr. For the spec see, // https://github.com/postgres/postgres/blob/81c5e46c490e2426db243eada186995da5bb0ba7/src/backend/utils/adt/network.c#L144 // The pgBinary encoding is as follows: // The int32 length of the following bytes. // The family byte. // The mask size byte. // A 0 byte for is_cidr. It's ignored on the postgres frontend. // The length of our IP bytes. // The IP bytes. const pgIPAddrBinaryHeaderSize = 4 if v.Family == ipaddr.IPv4family { b.putInt32(net.IPv4len + pgIPAddrBinaryHeaderSize) b.writeByte(pgwirebase.PGBinaryIPv4family) b.writeByte(v.Mask) b.writeByte(0) b.writeByte(byte(net.IPv4len)) err := v.Addr.WriteIPv4Bytes(b) if err != nil { b.setError(err) } } else if v.Family == ipaddr.IPv6family { b.putInt32(net.IPv6len + pgIPAddrBinaryHeaderSize) b.writeByte(pgwirebase.PGBinaryIPv6family) b.writeByte(v.Mask) b.writeByte(0) b.writeByte(byte(net.IPv6len)) err := v.Addr.WriteIPv6Bytes(b) if err != nil { b.setError(err) } } else { b.setError(errors.Errorf("error encoding inet to pgBinary: %v", v.IPAddr)) } case *ast.DEnum: b.writeLengthPrefixedString(v.LogicalRep) case *ast.DString: b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t)) case *ast.DCollatedString: b.writeLengthPrefixedString(v.Contents) case *ast.DTimestamp: b.putInt32(8) b.putInt64(timeToPgBinary(v.Time, nil)) case *ast.DTimestampTZ: b.putInt32(8) b.putInt64(timeToPgBinary(v.Time, sessionLoc)) case *ast.DDate: b.putInt32(4) b.putInt32(v.PGEpochDays()) case *ast.DTime: b.putInt32(8) b.putInt64(int64(*v)) case *ast.DTimeTZ: b.putInt32(12) b.putInt64(int64(v.TimeOfDay)) b.putInt32(v.OffsetSecs) case *ast.DInterval: b.putInt32(16) b.putInt64(v.Nanos() / int64(time.Microsecond/time.Nanosecond)) b.putInt32(int32(v.Days)) b.putInt32(int32(v.Months)) case *ast.DTuple: // TODO(andrei): We shouldn't be allocating a new buffer for every array. subWriter := newWriteBuffer(nil /* bytecount */) // Put the number of datums. subWriter.putInt32(int32(len(v.D))) for _, elem := range v.D { oid := elem.ResolvedType().Oid() subWriter.putInt32(int32(oid)) subWriter.writeBinaryDatum(ctx, elem, sessionLoc, elem.ResolvedType()) } b.writeLengthPrefixedBuffer(&subWriter.wrapped) case *ast.DGeography: b.putInt32(int32(len(v.EWKB()))) b.write(v.EWKB()) case *ast.DGeometry: b.putInt32(int32(len(v.EWKB()))) b.write(v.EWKB()) case *ast.DArray: if v.ParamTyp.Family() == types.ArrayFamily { b.setError(unimplemented.NewWithIssueDetail(32552, "binenc", "unsupported binary serialization of multidimensional arrays")) return } // TODO(andrei): We shouldn't be allocating a new buffer for every array. subWriter := newWriteBuffer(nil /* bytecount */) // Put the number of dimensions. We currently support 1d arrays only. var ndims int32 = 1 if v.Len() == 0 { ndims = 0 } subWriter.putInt32(ndims) hasNulls := 0 if v.HasNulls { hasNulls = 1 } oid := v.ParamTyp.Oid() subWriter.putInt32(int32(hasNulls)) subWriter.putInt32(int32(oid)) if v.Len() > 0 { subWriter.putInt32(int32(v.Len())) // Lower bound, we only support a lower bound of 1. subWriter.putInt32(1) for _, elem := range v.Array { subWriter.writeBinaryDatum(ctx, elem, sessionLoc, v.ParamTyp) } } b.writeLengthPrefixedBuffer(&subWriter.wrapped) case *ast.DJSON: s := v.JSON.String() b.putInt32(int32(len(s) + 1)) // Postgres version number, as of writing, `1` is the only valid value. b.writeByte(1) b.writeString(s) case *ast.DOid: b.putInt32(4) b.putInt32(int32(v.DInt)) default: b.setError(errors.AssertionFailedf("unsupported type %T", d)) } } const ( pgTimeFormat = "15:04:05.999999" pgTimeTZFormat = pgTimeFormat + "-07:00" pgDateFormat = "2006-01-02" pgTimeStampFormatNoOffset = pgDateFormat + " " + pgTimeFormat pgTimeStampFormat = pgTimeStampFormatNoOffset + "-07:00" pgTime2400Format = "24:00:00" ) // formatTime formats t into a format lib/pq understands, appending to the // provided tmp buffer and reallocating if needed. The function will then return // the resulting buffer. func formatTime(t timeofday.TimeOfDay, tmp []byte) []byte { // time.Time's AppendFormat does not recognize 2400, so special case it accordingly. if t == timeofday.Time2400 { return []byte(pgTime2400Format) } return t.ToTime().AppendFormat(tmp, pgTimeFormat) } // formatTimeTZ formats t into a format lib/pq understands, appending to the // provided tmp buffer and reallocating if needed. The function will then return // the resulting buffer. // Note it does not understand the "second" component of the offset as lib/pq // cannot parse it. func
formatTimeTZ
identifier_name
lmdb_backend.rs
x| x.1), )) } fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> { let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec()); self.db()?.get_ser(&key).map_err(|e| e.into()) } fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> { Ok(Box::new( self.db()? .iter(&[TX_LOG_ENTRY_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> { let ctx_key = to_key_u64( PRIVATE_TX_CONTEXT_PREFIX, &mut slate_id.to_vec(), participant_id as u64, ); let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?; let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || { format!("Slate id: {:x?}", slate_id.to_vec()) })?; for i in 0..SECRET_KEY_SIZE { ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i]; ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i]; } Ok(ctx) } fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> { Ok(Box::new( self.db()? .iter(&[ACCOUNT_PATH_MAPPING_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> { let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec()); let ser = self.db()?.get_ser(&acct_key)?; Ok(ser) } fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> { let filename = format!("{}.grintx", uuid); let path = Path::new(&self.config.data_file_dir) .join(TX_SAVE_DIR) .join(filename); if !path.exists() { return Ok(None); } let tx_file = Path::new(&path).to_path_buf(); let mut tx_f = File::open(tx_file)?; let mut content = String::new(); tx_f.read_to_string(&mut content)?; let tx_bin = from_hex(content).unwrap(); Ok(Some( ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(), )) } fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> { let filename = format!("{}.proof", uuid); let path = Path::new(&self.config.data_file_dir) .join(TX_PROOF_SAVE_DIR) .join(filename); let tx_proof_file = Path::new(&path).to_path_buf(); Ok(tx_proof_file.exists()) } fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> { let filename = format!("{}.proof", uuid); let path = Path::new(&self.config.data_file_dir) .join(TX_PROOF_SAVE_DIR) .join(filename); let tx_proof_file = Path::new(&path).to_path_buf(); if !tx_proof_file.exists() { return Ok(None); } let mut tx_proof_f = File::open(tx_proof_file)?; let mut content = String::new(); tx_proof_f.read_to_string(&mut content)?; Ok(Some(serde_json::from_str(&content)?)) } fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> { Ok(Box::new(Batch { _store: self, db: RefCell::new(Some(self.db()?.batch()?)), keychain: self.keychain.clone(), })) } fn next_child<'a>(&mut self) -> Result<Identifier> { let mut deriv_idx = { let batch = self.db()?.batch()?; let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec()); match batch.get_ser(&deriv_key)? { Some(idx) => idx, None => 0, } }; let mut return_path = self.parent_key_id.to_path(); return_path.depth = return_path.depth + 1; return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx); deriv_idx = deriv_idx + 1; let mut batch = self.batch()?; batch.save_child_index(&self.parent_key_id, deriv_idx)?; batch.commit()?; Ok(Identifier::from_path(&return_path)) } fn get_last_confirmed_height<'a>(&self) -> Result<u64> { let batch = self.db()?.batch()?; let height_key = to_key( CONFIRMED_HEIGHT_PREFIX, &mut self.parent_key_id.to_bytes().to_vec(), ); let last_confirmed_height = match batch.get_ser(&height_key)? { Some(h) => h, None => 0, }; Ok(last_confirmed_height) } fn restore(&mut self) -> Result<()> { restore::restore(self).context(ErrorKind::Restore)?; Ok(()) } fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> { restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?; Ok(()) } fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> { if self.config.no_commit_cache == Some(true) { Ok(None) } else { Ok(Some(grin_util::to_hex( self.keychain() .commit(amount, id, &SwitchCommitmentType::Regular)? .0 .to_vec(), ))) } } } /// An atomic batch in which all changes can be committed all at once or /// discarded on error. pub struct Batch<'a, C, K> where C: NodeClient, K: Keychain, { _store: &'a Backend<C, K>, db: RefCell<Option<grin_store::Batch<'a>>>, /// Keychain keychain: Option<K>, } #[allow(missing_docs)] impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K> where C: NodeClient, K: Keychain, { fn keychain(&mut self) -> &mut K { self.keychain.as_mut().unwrap() } fn save_output(&mut self, out: &OutputData) -> Result<()> { // Save the output data to the db. { let key = match out.mmr_index { Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i), None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()), }; self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?; } Ok(()) } fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> { // Delete the output data. { let key = match mmr_index { Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i), None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()), }; let _ = self.db.borrow().as_ref().unwrap().delete(&key); } Ok(()) } fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> { let filename = format!("{}.grintx", uuid); let path = Path::new(&self._store.config.data_file_dir) .join(TX_SAVE_DIR) .join(filename); let path_buf = Path::new(&path).to_path_buf(); let mut stored_tx = File::create(path_buf)?; let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap()); stored_tx.write_all(&tx_hex.as_bytes())?; stored_tx.sync_all()?; Ok(()) } fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> { let filename = format!("{}.proof", uuid); let path = Path::new(&self._store.config.data_file_dir) .join(TX_PROOF_SAVE_DIR) .join(filename); let path_buf = Path::new(&path).to_path_buf(); let mut stored_tx = File::create(path_buf)?; let proof_ser = serde_json::to_string(tx_proof)?; stored_tx.write_all(&proof_ser.as_bytes())?; stored_tx.sync_all()?; Ok(()) } fn
next_tx_log_id
identifier_name
lmdb_backend.rs
.deref())?; self.password = Some(password); Ok(()) } /// Clear out backend fn clear(&mut self) -> Result<()> { self.disconnect()?; let root_path = Path::new(&self.config.data_file_dir); if !root_path.exists() { return Ok(()); } let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string(); let backup_path = root_path.join("backups").join(backup_dir); fs::create_dir_all(&backup_path)?; let db_path = root_path.join(DB_DIR); if db_path.exists() { fs::rename(&db_path, &backup_path.join(DB_DIR))?; } let txs_path = root_path.join(TX_SAVE_DIR); if txs_path.exists() { fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?; } let proofs_path = root_path.join(TX_PROOF_SAVE_DIR); if proofs_path.exists() { fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?; } self.connect()?; Ok(()) } /// Initialise with whatever stored credentials we have fn open_with_credentials(&mut self) -> Result<()> { let wallet_seed = WalletSeed::from_file( &self.config, &self.password.clone().ok_or(ErrorKind::OpenWalletError)?, ) .map_err(|_| ErrorKind::OpenWalletError)?; self.keychain = Some( wallet_seed .derive_keychain(global::is_floonet()) .map_err(|_| ErrorKind::DeriveKeychainError)?, ); Ok(()) } /// Close wallet and remove any stored credentials (TBD) fn close(&mut self) -> Result<()> { self.keychain = None; Ok(()) } /// Return the keychain being used fn keychain(&mut self) -> &mut K { self.keychain.as_mut().unwrap() } /// Return the node client being used fn w2n_client(&mut self) -> &mut C { &mut self.w2n_client } /// Set parent path by account name fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> { let label = label.to_owned(); let res = self.accounts()?.find(|l| l.label == label); if let Some(a) = res { self.set_parent_key_id(&a.path); Ok(()) } else { return Err(ErrorKind::UnknownAccountLabel(label.clone()).into()); } } /// set parent path fn set_parent_key_id(&mut self, id: &Identifier) { self.parent_key_id = id.clone(); } fn get_parent_key_id(&self) -> Identifier { self.parent_key_id.clone() } fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> { let key = match mmr_index { Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i), None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()), }; option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id)) .map_err(|e| e.into()) } fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> { Ok(Box::new( self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1), )) } fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> { let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec()); self.db()?.get_ser(&key).map_err(|e| e.into()) } fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> { Ok(Box::new( self.db()? .iter(&[TX_LOG_ENTRY_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> { let ctx_key = to_key_u64( PRIVATE_TX_CONTEXT_PREFIX, &mut slate_id.to_vec(), participant_id as u64, ); let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?; let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || { format!("Slate id: {:x?}", slate_id.to_vec()) })?; for i in 0..SECRET_KEY_SIZE { ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i]; ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i]; } Ok(ctx) } fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> { Ok(Box::new( self.db()? .iter(&[ACCOUNT_PATH_MAPPING_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> { let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec()); let ser = self.db()?.get_ser(&acct_key)?; Ok(ser) } fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> { let filename = format!("{}.grintx", uuid); let path = Path::new(&self.config.data_file_dir) .join(TX_SAVE_DIR) .join(filename); if !path.exists() { return Ok(None); } let tx_file = Path::new(&path).to_path_buf(); let mut tx_f = File::open(tx_file)?; let mut content = String::new(); tx_f.read_to_string(&mut content)?; let tx_bin = from_hex(content).unwrap(); Ok(Some( ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(), )) } fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> { let filename = format!("{}.proof", uuid); let path = Path::new(&self.config.data_file_dir) .join(TX_PROOF_SAVE_DIR) .join(filename); let tx_proof_file = Path::new(&path).to_path_buf(); Ok(tx_proof_file.exists()) } fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> { let filename = format!("{}.proof", uuid); let path = Path::new(&self.config.data_file_dir) .join(TX_PROOF_SAVE_DIR) .join(filename); let tx_proof_file = Path::new(&path).to_path_buf(); if !tx_proof_file.exists() { return Ok(None); } let mut tx_proof_f = File::open(tx_proof_file)?; let mut content = String::new(); tx_proof_f.read_to_string(&mut content)?; Ok(Some(serde_json::from_str(&content)?)) } fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> { Ok(Box::new(Batch { _store: self, db: RefCell::new(Some(self.db()?.batch()?)), keychain: self.keychain.clone(), })) } fn next_child<'a>(&mut self) -> Result<Identifier> { let mut deriv_idx = { let batch = self.db()?.batch()?; let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec()); match batch.get_ser(&deriv_key)? { Some(idx) => idx, None => 0, } }; let mut return_path = self.parent_key_id.to_path(); return_path.depth = return_path.depth + 1; return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx); deriv_idx = deriv_idx + 1; let mut batch = self.batch()?; batch.save_child_index(&self.parent_key_id, deriv_idx)?; batch.commit()?; Ok(Identifier::from_path(&return_path)) } fn get_last_confirmed_height<'a>(&self) -> Result<u64> { let batch = self.db()?.batch()?; let height_key = to_key( CONFIRMED_HEIGHT_PREFIX, &mut self.parent_key_id.to_bytes().to_vec(), ); let last_confirmed_height = match batch.get_ser(&height_key)? { Some(h) => h, None => 0, }; Ok(last_confirmed_height) } fn restore(&mut self) -> Result<()> { restore::restore(self).context(ErrorKind::Restore)?;
Ok(()) }
random_line_split
lmdb_backend.rs
None, password: Some(ZeroingString::from(password)), keychain: None, parent_key_id: K::derive_key_id(2, 0, 0, 0, 0), config: config.clone(), w2n_client: n_client, }; Ok(res) }*/ } impl<C, K> WalletBackend<C, K> for Backend<C, K> where C: NodeClient, K: Keychain, { /// Check whether the backend has a seed or not fn has_seed(&self) -> Result<bool> { Ok(WalletSeed::seed_file_exists(&self.config).is_err()) } /// Get the seed fn get_seed(&self) -> Result<ZeroingString> { match &self.password { Some(p) => { let seed = WalletSeed::from_file(&self.config, p)?; seed.to_mnemonic().map(|s| s.into()) } None => Err(ErrorKind::NoWallet.into()), } } /// Set a new seed, encrypt with `password` /// Should fail if backend already has a seed, /// unless `overwrite` is set to `true fn set_seed( &mut self, mnemonic: Option<ZeroingString>, password: ZeroingString, overwrite: bool, ) -> Result<()> { if self.has_seed()? && !overwrite { return Err(ErrorKind::WalletHasSeed.into()); } self.password = Some(password.clone()); let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?; Ok(()) } /// Check if the backend connection is established fn connected(&self) -> Result<bool> { Ok(self.db.is_some()) } /// Connect to the backend fn connect(&mut self) -> Result<()> { if !self.has_seed()? { return Err(ErrorKind::WalletNoSeed.into()); } if self.connected()? { return Err(ErrorKind::WalletConnected.into()); } let root_path = Path::new(&self.config.data_file_dir); let db_path = root_path.join(DB_DIR); fs::create_dir_all(&db_path)?; let stored_tx_path = root_path.join(TX_SAVE_DIR); fs::create_dir_all(&stored_tx_path)?; let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR); fs::create_dir_all(&stored_tx_proof_path)?; let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?; let default_account = AcctPathMapping { label: "default".to_string(), path: K::derive_key_id(2, 0, 0, 0, 0), }; let acct_key = to_key( ACCOUNT_PATH_MAPPING_PREFIX, &mut default_account.label.as_bytes().to_vec(), ); if !store.exists(&acct_key)?
self.db = Some(store); Ok(()) } /// Disconnect from backend fn disconnect(&mut self) -> Result<()> { self.db = None; Ok(()) } /// Set password fn set_password(&mut self, password: ZeroingString) -> Result<()> { let _ = WalletSeed::from_file(&self.config, password.deref())?; self.password = Some(password); Ok(()) } /// Clear out backend fn clear(&mut self) -> Result<()> { self.disconnect()?; let root_path = Path::new(&self.config.data_file_dir); if !root_path.exists() { return Ok(()); } let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string(); let backup_path = root_path.join("backups").join(backup_dir); fs::create_dir_all(&backup_path)?; let db_path = root_path.join(DB_DIR); if db_path.exists() { fs::rename(&db_path, &backup_path.join(DB_DIR))?; } let txs_path = root_path.join(TX_SAVE_DIR); if txs_path.exists() { fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?; } let proofs_path = root_path.join(TX_PROOF_SAVE_DIR); if proofs_path.exists() { fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?; } self.connect()?; Ok(()) } /// Initialise with whatever stored credentials we have fn open_with_credentials(&mut self) -> Result<()> { let wallet_seed = WalletSeed::from_file( &self.config, &self.password.clone().ok_or(ErrorKind::OpenWalletError)?, ) .map_err(|_| ErrorKind::OpenWalletError)?; self.keychain = Some( wallet_seed .derive_keychain(global::is_floonet()) .map_err(|_| ErrorKind::DeriveKeychainError)?, ); Ok(()) } /// Close wallet and remove any stored credentials (TBD) fn close(&mut self) -> Result<()> { self.keychain = None; Ok(()) } /// Return the keychain being used fn keychain(&mut self) -> &mut K { self.keychain.as_mut().unwrap() } /// Return the node client being used fn w2n_client(&mut self) -> &mut C { &mut self.w2n_client } /// Set parent path by account name fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> { let label = label.to_owned(); let res = self.accounts()?.find(|l| l.label == label); if let Some(a) = res { self.set_parent_key_id(&a.path); Ok(()) } else { return Err(ErrorKind::UnknownAccountLabel(label.clone()).into()); } } /// set parent path fn set_parent_key_id(&mut self, id: &Identifier) { self.parent_key_id = id.clone(); } fn get_parent_key_id(&self) -> Identifier { self.parent_key_id.clone() } fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> { let key = match mmr_index { Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i), None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()), }; option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id)) .map_err(|e| e.into()) } fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> { Ok(Box::new( self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1), )) } fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> { let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec()); self.db()?.get_ser(&key).map_err(|e| e.into()) } fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> { Ok(Box::new( self.db()? .iter(&[TX_LOG_ENTRY_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> { let ctx_key = to_key_u64( PRIVATE_TX_CONTEXT_PREFIX, &mut slate_id.to_vec(), participant_id as u64, ); let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?; let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || { format!("Slate id: {:x?}", slate_id.to_vec()) })?; for i in 0..SECRET_KEY_SIZE { ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i]; ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i]; } Ok(ctx) } fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> { Ok(Box::new( self.db()? .iter(&[ACCOUNT_PATH_MAPPING_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> { let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec()); let ser = self.db()?.get_ser(&acct_key)?; Ok(ser) } fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> { let filename = format!("
{ let batch = store.batch()?; batch.put_ser(&acct_key, &default_account)?; batch.commit()?; }
conditional_block
lmdb_backend.rs
None, password: Some(ZeroingString::from(password)), keychain: None, parent_key_id: K::derive_key_id(2, 0, 0, 0, 0), config: config.clone(), w2n_client: n_client, }; Ok(res) }*/ } impl<C, K> WalletBackend<C, K> for Backend<C, K> where C: NodeClient, K: Keychain, { /// Check whether the backend has a seed or not fn has_seed(&self) -> Result<bool> { Ok(WalletSeed::seed_file_exists(&self.config).is_err()) } /// Get the seed fn get_seed(&self) -> Result<ZeroingString> { match &self.password { Some(p) => { let seed = WalletSeed::from_file(&self.config, p)?; seed.to_mnemonic().map(|s| s.into()) } None => Err(ErrorKind::NoWallet.into()), } } /// Set a new seed, encrypt with `password` /// Should fail if backend already has a seed, /// unless `overwrite` is set to `true fn set_seed( &mut self, mnemonic: Option<ZeroingString>, password: ZeroingString, overwrite: bool, ) -> Result<()> { if self.has_seed()? && !overwrite { return Err(ErrorKind::WalletHasSeed.into()); } self.password = Some(password.clone()); let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?; Ok(()) } /// Check if the backend connection is established fn connected(&self) -> Result<bool> { Ok(self.db.is_some()) } /// Connect to the backend fn connect(&mut self) -> Result<()> { if !self.has_seed()? { return Err(ErrorKind::WalletNoSeed.into()); } if self.connected()? { return Err(ErrorKind::WalletConnected.into()); } let root_path = Path::new(&self.config.data_file_dir); let db_path = root_path.join(DB_DIR); fs::create_dir_all(&db_path)?; let stored_tx_path = root_path.join(TX_SAVE_DIR); fs::create_dir_all(&stored_tx_path)?; let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR); fs::create_dir_all(&stored_tx_proof_path)?; let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?; let default_account = AcctPathMapping { label: "default".to_string(), path: K::derive_key_id(2, 0, 0, 0, 0), }; let acct_key = to_key( ACCOUNT_PATH_MAPPING_PREFIX, &mut default_account.label.as_bytes().to_vec(), ); if !store.exists(&acct_key)? { let batch = store.batch()?; batch.put_ser(&acct_key, &default_account)?; batch.commit()?; } self.db = Some(store); Ok(()) } /// Disconnect from backend fn disconnect(&mut self) -> Result<()> { self.db = None; Ok(()) } /// Set password fn set_password(&mut self, password: ZeroingString) -> Result<()> { let _ = WalletSeed::from_file(&self.config, password.deref())?; self.password = Some(password); Ok(()) } /// Clear out backend fn clear(&mut self) -> Result<()> { self.disconnect()?; let root_path = Path::new(&self.config.data_file_dir); if !root_path.exists() { return Ok(()); } let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string(); let backup_path = root_path.join("backups").join(backup_dir); fs::create_dir_all(&backup_path)?; let db_path = root_path.join(DB_DIR); if db_path.exists() { fs::rename(&db_path, &backup_path.join(DB_DIR))?; } let txs_path = root_path.join(TX_SAVE_DIR); if txs_path.exists() { fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?; } let proofs_path = root_path.join(TX_PROOF_SAVE_DIR); if proofs_path.exists() { fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?; } self.connect()?; Ok(()) } /// Initialise with whatever stored credentials we have fn open_with_credentials(&mut self) -> Result<()> { let wallet_seed = WalletSeed::from_file( &self.config, &self.password.clone().ok_or(ErrorKind::OpenWalletError)?, ) .map_err(|_| ErrorKind::OpenWalletError)?; self.keychain = Some( wallet_seed .derive_keychain(global::is_floonet()) .map_err(|_| ErrorKind::DeriveKeychainError)?, ); Ok(()) } /// Close wallet and remove any stored credentials (TBD) fn close(&mut self) -> Result<()> { self.keychain = None; Ok(()) } /// Return the keychain being used fn keychain(&mut self) -> &mut K { self.keychain.as_mut().unwrap() } /// Return the node client being used fn w2n_client(&mut self) -> &mut C { &mut self.w2n_client } /// Set parent path by account name fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> { let label = label.to_owned(); let res = self.accounts()?.find(|l| l.label == label); if let Some(a) = res { self.set_parent_key_id(&a.path); Ok(()) } else { return Err(ErrorKind::UnknownAccountLabel(label.clone()).into()); } } /// set parent path fn set_parent_key_id(&mut self, id: &Identifier) { self.parent_key_id = id.clone(); } fn get_parent_key_id(&self) -> Identifier { self.parent_key_id.clone() } fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> { let key = match mmr_index { Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i), None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()), }; option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id)) .map_err(|e| e.into()) } fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> { Ok(Box::new( self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1), )) } fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> { let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec()); self.db()?.get_ser(&key).map_err(|e| e.into()) } fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> { Ok(Box::new( self.db()? .iter(&[TX_LOG_ENTRY_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context>
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> { Ok(Box::new( self.db()? .iter(&[ACCOUNT_PATH_MAPPING_PREFIX]) .unwrap() .map(|x| x.1), )) } fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> { let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec()); let ser = self.db()?.get_ser(&acct_key)?; Ok(ser) } fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> { let filename = format!("
{ let ctx_key = to_key_u64( PRIVATE_TX_CONTEXT_PREFIX, &mut slate_id.to_vec(), participant_id as u64, ); let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?; let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || { format!("Slate id: {:x?}", slate_id.to_vec()) })?; for i in 0..SECRET_KEY_SIZE { ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i]; ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i]; } Ok(ctx) }
identifier_body
lib.rs
str, fields: Option<&[(&str, u32)]>,
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>; } /// A collection of methods to interact with the datapath. #[derive(Clone)] pub struct Datapath<T: Ipc> { sock_id: u32, sender: BackendSender<T>, programs: Rc<HashMap<String, Scope>>, } impl<T: Ipc> DatapathTrait for Datapath<T> { fn get_sock_id(&self) -> u32 { self.sock_id } fn set_program( &mut self, program_name: &'static str, fields: Option<&[(&str, u32)]>, ) -> Result<Scope> { // if the program with this key exists, return it; otherwise return nothing match self.programs.get(program_name) { Some(sc) => { // apply optional updates to values of registers in this scope let fields: Vec<(Reg, u64)> = fields .unwrap_or_else(|| &[]) .iter() .map(|&(reg_name, new_value)| { if reg_name.starts_with("__") { return Err(Error(format!( "Cannot update reserved field: {:?}", reg_name ))); } sc.get(reg_name) .ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name))) .and_then(|reg| match *reg { Reg::Control(idx, ref t) => { Ok((Reg::Control(idx, t.clone()), u64::from(new_value))) } Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => { Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value))) } _ => Err(Error(format!("Cannot update field: {:?}", reg_name))), }) }) .collect::<Result<_>>()?; let msg = serialize::changeprog::Msg { sid: self.sock_id, program_uid: sc.program_uid, num_fields: fields.len() as u32, fields, }; let buf = serialize::serialize(&msg)?; self.sender.send_msg(&buf[..])?; Ok(sc.clone()) } _ => Err(Error(format!( "Map does not contain datapath program with key: {:?}", program_name ))), } } fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> { let fields: Vec<(Reg, u64)> = update .iter() .map(|&(reg_name, new_value)| { if reg_name.starts_with("__") { return Err(Error(format!( "Cannot update reserved field: {:?}", reg_name ))); } sc.get(reg_name) .ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name))) .and_then(|reg| match *reg { Reg::Control(idx, ref t) => { Ok((Reg::Control(idx, t.clone()), u64::from(new_value))) } Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => { Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value))) } _ => Err(Error(format!("Cannot update field: {:?}", reg_name))), }) }) .collect::<Result<_>>()?; let msg = serialize::update_field::Msg { sid: self.sock_id, num_fields: fields.len() as u8, fields, }; let buf = serialize::serialize(&msg)?; self.sender.send_msg(&buf[..])?; Ok(()) } } fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()> where I: Ipc, { let msg = serialize::install::Msg { sid: sock_id, program_uid: sc.program_uid, num_events: bin.events.len() as u32, num_instrs: bin.instrs.len() as u32, instrs: bin, }; let buf = serialize::serialize(&msg)?; sender.send_msg(&buf[..])?; Ok(()) } /// Configuration parameters for the portus runtime. /// Defines a `slog::Logger` to use for (optional) logging #[derive(Clone, Default)] pub struct Config { pub logger: Option<slog::Logger>, } /// The set of information passed by the datapath to CCP /// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination /// IP and port), the initial congestion window (`init_cwnd`), and flow MSS. #[derive(Debug, Clone)] pub struct DatapathInfo { pub sock_id: u32, pub init_cwnd: u32, pub mss: u32, pub src_ip: u32, pub src_port: u32, pub dst_ip: u32, pub dst_port: u32, } /// Contains the values of the pre-defined Report struct from the fold function. /// Use `get_field` to query its values using the names defined in the fold function. pub struct Report { pub program_uid: u32, fields: Vec<u64>, } impl Report { /// Uses the `Scope` returned by `lang::compile` (or `install`) to query /// the `Report` for its values. pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> { if sc.program_uid != self.program_uid { return Err(Error::from(StaleProgramError)); } match sc.get(field) { Some(r) => match *r { Reg::Report(idx, _, _) => { if idx as usize >= self.fields.len() { Err(Error::from(InvalidReportError)) } else { Ok(self.fields[idx as usize]) } } _ => Err(Error::from(InvalidRegTypeError)), }, None => Err(Error::from(FieldNotFoundError)), } } } /// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and ///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control /// algorithm. /// /// * `CongAlg` implements functionality which applies to a given algorithm as a whole /// * `Flow` implements functionality specific to an individual flow /// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built /// from given command-line arguments. pub trait Flow { /// This callback specifies the algorithm's behavior when it receives a report /// of measurements from the datapath. fn on_report(&mut self, sock_id: u32, m: Report); /// Optionally specify what the algorithm should do when the flow ends, /// e.g., clean up any external resources. /// The default implementation does nothing. fn close(&mut self) {} } impl<T> Flow for Box<T> where T: Flow + ?Sized, { fn on_report(&mut self, sock_id: u32, m: Report) { T::on_report(self, sock_id, m) } fn close(&mut self) { T::close(self) } } /// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and /// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm. /// /// * `CongAlg` implements functionality which applies to a given algorithm as a whole /// * `Flow` implements functionality specific to an individual flow /// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built /// from given command-line arguments. pub trait CongAlg<I: Ipc> { /// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage /// an individual connection. type Flow: Flow; /// A unique name for the algorithm. fn name() -> &'static str; /// `datapath_programs` returns all datapath programs the congestion control algorithm /// will to use during its execution. It is called once, when Portus initializes /// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)). /// /// It should return a vector of string tuples, where the first string in each tuple is a unique name /// identifying the program, and the second string is the code for the program itself. /// /// The Portus runtime will panic if any of the datapath programs do not compile. /// /// For example, /// ``` /// extern crate fnv; /// use std::collections::HashMap; /// let mut h = HashMap::new(); /// h.insert("prog1", "...(program)...".to_string()); /// h.insert("prog2", "...(program)...".to_string()); /// ``` fn datapath_programs(&self) -> HashMap<&'static str, String>; /// Create a new instance of the CongAlg to manage a new flow. /// Optionally
) -> Result<Scope>; /// Update the value of a register in an already-installed fold function.
random_line_split
lib.rs
str, fields: Option<&[(&str, u32)]>, ) -> Result<Scope>; /// Update the value of a register in an already-installed fold function. fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>; } /// A collection of methods to interact with the datapath. #[derive(Clone)] pub struct Datapath<T: Ipc> { sock_id: u32, sender: BackendSender<T>, programs: Rc<HashMap<String, Scope>>, } impl<T: Ipc> DatapathTrait for Datapath<T> { fn get_sock_id(&self) -> u32 { self.sock_id } fn set_program( &mut self, program_name: &'static str, fields: Option<&[(&str, u32)]>, ) -> Result<Scope> { // if the program with this key exists, return it; otherwise return nothing match self.programs.get(program_name) { Some(sc) => { // apply optional updates to values of registers in this scope let fields: Vec<(Reg, u64)> = fields .unwrap_or_else(|| &[]) .iter() .map(|&(reg_name, new_value)| { if reg_name.starts_with("__") { return Err(Error(format!( "Cannot update reserved field: {:?}", reg_name ))); } sc.get(reg_name) .ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name))) .and_then(|reg| match *reg { Reg::Control(idx, ref t) => { Ok((Reg::Control(idx, t.clone()), u64::from(new_value))) } Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => { Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value))) } _ => Err(Error(format!("Cannot update field: {:?}", reg_name))), }) }) .collect::<Result<_>>()?; let msg = serialize::changeprog::Msg { sid: self.sock_id, program_uid: sc.program_uid, num_fields: fields.len() as u32, fields, }; let buf = serialize::serialize(&msg)?; self.sender.send_msg(&buf[..])?; Ok(sc.clone()) } _ => Err(Error(format!( "Map does not contain datapath program with key: {:?}", program_name ))), } } fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> { let fields: Vec<(Reg, u64)> = update .iter() .map(|&(reg_name, new_value)| { if reg_name.starts_with("__") { return Err(Error(format!( "Cannot update reserved field: {:?}", reg_name ))); } sc.get(reg_name) .ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name))) .and_then(|reg| match *reg { Reg::Control(idx, ref t) => { Ok((Reg::Control(idx, t.clone()), u64::from(new_value))) } Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => { Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value))) } _ => Err(Error(format!("Cannot update field: {:?}", reg_name))), }) }) .collect::<Result<_>>()?; let msg = serialize::update_field::Msg { sid: self.sock_id, num_fields: fields.len() as u8, fields, }; let buf = serialize::serialize(&msg)?; self.sender.send_msg(&buf[..])?; Ok(()) } } fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()> where I: Ipc, { let msg = serialize::install::Msg { sid: sock_id, program_uid: sc.program_uid, num_events: bin.events.len() as u32, num_instrs: bin.instrs.len() as u32, instrs: bin, }; let buf = serialize::serialize(&msg)?; sender.send_msg(&buf[..])?; Ok(()) } /// Configuration parameters for the portus runtime. /// Defines a `slog::Logger` to use for (optional) logging #[derive(Clone, Default)] pub struct Config { pub logger: Option<slog::Logger>, } /// The set of information passed by the datapath to CCP /// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination /// IP and port), the initial congestion window (`init_cwnd`), and flow MSS. #[derive(Debug, Clone)] pub struct
{ pub sock_id: u32, pub init_cwnd: u32, pub mss: u32, pub src_ip: u32, pub src_port: u32, pub dst_ip: u32, pub dst_port: u32, } /// Contains the values of the pre-defined Report struct from the fold function. /// Use `get_field` to query its values using the names defined in the fold function. pub struct Report { pub program_uid: u32, fields: Vec<u64>, } impl Report { /// Uses the `Scope` returned by `lang::compile` (or `install`) to query /// the `Report` for its values. pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> { if sc.program_uid != self.program_uid { return Err(Error::from(StaleProgramError)); } match sc.get(field) { Some(r) => match *r { Reg::Report(idx, _, _) => { if idx as usize >= self.fields.len() { Err(Error::from(InvalidReportError)) } else { Ok(self.fields[idx as usize]) } } _ => Err(Error::from(InvalidRegTypeError)), }, None => Err(Error::from(FieldNotFoundError)), } } } /// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and ///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control /// algorithm. /// /// * `CongAlg` implements functionality which applies to a given algorithm as a whole /// * `Flow` implements functionality specific to an individual flow /// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built /// from given command-line arguments. pub trait Flow { /// This callback specifies the algorithm's behavior when it receives a report /// of measurements from the datapath. fn on_report(&mut self, sock_id: u32, m: Report); /// Optionally specify what the algorithm should do when the flow ends, /// e.g., clean up any external resources. /// The default implementation does nothing. fn close(&mut self) {} } impl<T> Flow for Box<T> where T: Flow + ?Sized, { fn on_report(&mut self, sock_id: u32, m: Report) { T::on_report(self, sock_id, m) } fn close(&mut self) { T::close(self) } } /// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and /// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm. /// /// * `CongAlg` implements functionality which applies to a given algorithm as a whole /// * `Flow` implements functionality specific to an individual flow /// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built /// from given command-line arguments. pub trait CongAlg<I: Ipc> { /// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage /// an individual connection. type Flow: Flow; /// A unique name for the algorithm. fn name() -> &'static str; /// `datapath_programs` returns all datapath programs the congestion control algorithm /// will to use during its execution. It is called once, when Portus initializes /// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)). /// /// It should return a vector of string tuples, where the first string in each tuple is a unique name /// identifying the program, and the second string is the code for the program itself. /// /// The Portus runtime will panic if any of the datapath programs do not compile. /// /// For example, /// ``` /// extern crate fnv; /// use std::collections::HashMap; /// let mut h = HashMap::new(); /// h.insert("prog1", "...(program)...".to_string()); /// h.insert("prog2", "...(program)...".to_string()); /// ``` fn datapath_programs(&self) -> HashMap<&'static str, String>; /// Create a new instance of the CongAlg to manage a new flow. ///
DatapathInfo
identifier_name
lib.rs
str, fields: Option<&[(&str, u32)]>, ) -> Result<Scope>; /// Update the value of a register in an already-installed fold function. fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>; } /// A collection of methods to interact with the datapath. #[derive(Clone)] pub struct Datapath<T: Ipc> { sock_id: u32, sender: BackendSender<T>, programs: Rc<HashMap<String, Scope>>, } impl<T: Ipc> DatapathTrait for Datapath<T> { fn get_sock_id(&self) -> u32 { self.sock_id } fn set_program( &mut self, program_name: &'static str, fields: Option<&[(&str, u32)]>, ) -> Result<Scope> { // if the program with this key exists, return it; otherwise return nothing match self.programs.get(program_name) { Some(sc) => { // apply optional updates to values of registers in this scope let fields: Vec<(Reg, u64)> = fields .unwrap_or_else(|| &[]) .iter() .map(|&(reg_name, new_value)| { if reg_name.starts_with("__") { return Err(Error(format!( "Cannot update reserved field: {:?}", reg_name ))); } sc.get(reg_name) .ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name))) .and_then(|reg| match *reg { Reg::Control(idx, ref t) => { Ok((Reg::Control(idx, t.clone()), u64::from(new_value))) } Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => { Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value))) } _ => Err(Error(format!("Cannot update field: {:?}", reg_name))), }) }) .collect::<Result<_>>()?; let msg = serialize::changeprog::Msg { sid: self.sock_id, program_uid: sc.program_uid, num_fields: fields.len() as u32, fields, }; let buf = serialize::serialize(&msg)?; self.sender.send_msg(&buf[..])?; Ok(sc.clone()) } _ => Err(Error(format!( "Map does not contain datapath program with key: {:?}", program_name ))), } } fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> { let fields: Vec<(Reg, u64)> = update .iter() .map(|&(reg_name, new_value)| { if reg_name.starts_with("__") { return Err(Error(format!( "Cannot update reserved field: {:?}", reg_name ))); } sc.get(reg_name) .ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name))) .and_then(|reg| match *reg { Reg::Control(idx, ref t) => { Ok((Reg::Control(idx, t.clone()), u64::from(new_value))) } Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => { Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value))) } _ => Err(Error(format!("Cannot update field: {:?}", reg_name))), }) }) .collect::<Result<_>>()?; let msg = serialize::update_field::Msg { sid: self.sock_id, num_fields: fields.len() as u8, fields, }; let buf = serialize::serialize(&msg)?; self.sender.send_msg(&buf[..])?; Ok(()) } } fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()> where I: Ipc,
/// Configuration parameters for the portus runtime. /// Defines a `slog::Logger` to use for (optional) logging #[derive(Clone, Default)] pub struct Config { pub logger: Option<slog::Logger>, } /// The set of information passed by the datapath to CCP /// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination /// IP and port), the initial congestion window (`init_cwnd`), and flow MSS. #[derive(Debug, Clone)] pub struct DatapathInfo { pub sock_id: u32, pub init_cwnd: u32, pub mss: u32, pub src_ip: u32, pub src_port: u32, pub dst_ip: u32, pub dst_port: u32, } /// Contains the values of the pre-defined Report struct from the fold function. /// Use `get_field` to query its values using the names defined in the fold function. pub struct Report { pub program_uid: u32, fields: Vec<u64>, } impl Report { /// Uses the `Scope` returned by `lang::compile` (or `install`) to query /// the `Report` for its values. pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> { if sc.program_uid != self.program_uid { return Err(Error::from(StaleProgramError)); } match sc.get(field) { Some(r) => match *r { Reg::Report(idx, _, _) => { if idx as usize >= self.fields.len() { Err(Error::from(InvalidReportError)) } else { Ok(self.fields[idx as usize]) } } _ => Err(Error::from(InvalidRegTypeError)), }, None => Err(Error::from(FieldNotFoundError)), } } } /// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and ///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control /// algorithm. /// /// * `CongAlg` implements functionality which applies to a given algorithm as a whole /// * `Flow` implements functionality specific to an individual flow /// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built /// from given command-line arguments. pub trait Flow { /// This callback specifies the algorithm's behavior when it receives a report /// of measurements from the datapath. fn on_report(&mut self, sock_id: u32, m: Report); /// Optionally specify what the algorithm should do when the flow ends, /// e.g., clean up any external resources. /// The default implementation does nothing. fn close(&mut self) {} } impl<T> Flow for Box<T> where T: Flow + ?Sized, { fn on_report(&mut self, sock_id: u32, m: Report) { T::on_report(self, sock_id, m) } fn close(&mut self) { T::close(self) } } /// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and /// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm. /// /// * `CongAlg` implements functionality which applies to a given algorithm as a whole /// * `Flow` implements functionality specific to an individual flow /// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built /// from given command-line arguments. pub trait CongAlg<I: Ipc> { /// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage /// an individual connection. type Flow: Flow; /// A unique name for the algorithm. fn name() -> &'static str; /// `datapath_programs` returns all datapath programs the congestion control algorithm /// will to use during its execution. It is called once, when Portus initializes /// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)). /// /// It should return a vector of string tuples, where the first string in each tuple is a unique name /// identifying the program, and the second string is the code for the program itself. /// /// The Portus runtime will panic if any of the datapath programs do not compile. /// /// For example, /// ``` /// extern crate fnv; /// use std::collections::HashMap; /// let mut h = HashMap::new(); /// h.insert("prog1", "...(program)...".to_string()); /// h.insert("prog2", "...(program)...".to_string()); /// ``` fn datapath_programs(&self) -> HashMap<&'static str, String>; /// Create a new instance of the CongAlg to manage a new flow. ///
{ let msg = serialize::install::Msg { sid: sock_id, program_uid: sc.program_uid, num_events: bin.events.len() as u32, num_instrs: bin.instrs.len() as u32, instrs: bin, }; let buf = serialize::serialize(&msg)?; sender.send_msg(&buf[..])?; Ok(()) }
identifier_body
models.py
_lazy from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ from django_prometheus.models import ExportModelOperationsMixin from guardian.mixins import GuardianUserMixin from jinja2 import Undefined from jinja2.exceptions import TemplateSyntaxError, UndefinedError from jinja2.nativetypes import NativeEnvironment from model_utils.managers import InheritanceManager from structlog import get_logger from passbook.core.exceptions import PropertyMappingExpressionException from passbook.core.signals import password_changed from passbook.core.types import UILoginButton, UIUserSettings from passbook.lib.models import CreatedUpdatedModel, UUIDModel from passbook.policies.exceptions import PolicyException from passbook.policies.types import PolicyRequest, PolicyResult LOGGER = get_logger() NATIVE_ENVIRONMENT = NativeEnvironment() def default_nonce_duration(): """Default duration a Nonce is valid""" return now() + timedelta(hours=4) class Group(ExportModelOperationsMixin("group"), UUIDModel): """Custom Group model which supports a basic hierarchy""" name = models.CharField(_("name"), max_length=80) parent = models.ForeignKey( "Group", blank=True, null=True, on_delete=models.SET_NULL, related_name="children", ) attributes = JSONField(default=dict, blank=True) def __str__(self): return f"Group {self.name}" class Meta: unique_together = (("name", "parent",),) class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser): """Custom User model to allow easier adding o f user-based settings""" uuid = models.UUIDField(default=uuid4, editable=False) name = models.TextField(help_text=_("User's display name.")) sources = models.ManyToManyField("Source", through="UserSourceConnection") groups = models.ManyToManyField("Group") password_change_date = models.DateTimeField(auto_now_add=True) attributes = JSONField(default=dict, blank=True) def set_password(self, password): if self.pk: password_changed.send(sender=self, user=self, password=password) self.password_change_date = now() return super().set_password(password) class
: permissions = (("reset_user_password", "Reset Password"),) class Provider(ExportModelOperationsMixin("provider"), models.Model): """Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application""" property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) objects = InheritanceManager() # This class defines no field for easier inheritance def __str__(self): if hasattr(self, "name"): return getattr(self, "name") return super().__str__() class PolicyModel(UUIDModel, CreatedUpdatedModel): """Base model which can have policies applied to it""" policies = models.ManyToManyField("Policy", blank=True) class Factor(ExportModelOperationsMixin("factor"), PolicyModel): """Authentication factor, multiple instances of the same Factor can be used""" name = models.TextField(help_text=_("Factor's display Name.")) slug = models.SlugField( unique=True, help_text=_("Internal factor name, used in URLs.") ) order = models.IntegerField() enabled = models.BooleanField(default=True) objects = InheritanceManager() type = "" form = "" @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return f"Factor {self.slug}" class Application(ExportModelOperationsMixin("application"), PolicyModel): """Every Application which uses passbook for authentication/identification/authorization needs an Application record. Other authentication types can subclass this Model to add custom fields and other properties""" name = models.TextField(help_text=_("Application's display Name.")) slug = models.SlugField(help_text=_("Internal application name, used in URLs.")) skip_authorization = models.BooleanField(default=False) provider = models.OneToOneField( "Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT ) meta_launch_url = models.URLField(default="", blank=True) meta_icon_url = models.TextField(default="", blank=True) meta_description = models.TextField(default="", blank=True) meta_publisher = models.TextField(default="", blank=True) objects = InheritanceManager() def get_provider(self) -> Optional[Provider]: """Get casted provider instance""" if not self.provider: return None return Provider.objects.get_subclass(pk=self.provider.pk) def __str__(self): return self.name class Source(ExportModelOperationsMixin("source"), PolicyModel): """Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server""" name = models.TextField(help_text=_("Source's display Name.")) slug = models.SlugField(help_text=_("Internal source name, used in URLs.")) enabled = models.BooleanField(default=True) property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) form = "" # ModelForm-based class ued to create/edit instance objects = InheritanceManager() @property def ui_login_button(self) -> Optional[UILoginButton]: """If source uses a http-based flow, return UI Information about the login button. If source doesn't use http-based flow, return None.""" return None @property def ui_additional_info(self) -> Optional[str]: """Return additional Info, such as a callback URL. Show in the administration interface.""" return None @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return self.name class UserSourceConnection(CreatedUpdatedModel): """Connection between User and Source.""" user = models.ForeignKey(User, on_delete=models.CASCADE) source = models.ForeignKey(Source, on_delete=models.CASCADE) class Meta: unique_together = (("user", "source"),) class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel): """Policies which specify if a user is authorized to use an Application. Can be overridden by other types to add other fields, more logic, etc.""" name = models.TextField(blank=True, null=True) negate = models.BooleanField(default=False) order = models.IntegerField(default=0) timeout = models.IntegerField(default=30) objects = InheritanceManager() def __str__(self): return f"Policy {self.name}" def passes(self, request: PolicyRequest) -> PolicyResult: """Check if user instance passes this policy""" raise PolicyException() class DebugPolicy(Policy): """Policy used for debugging the PolicyEngine. Returns a fixed result, but takes a random time to process.""" result = models.BooleanField(default=False) wait_min = models.IntegerField(default=5) wait_max = models.IntegerField(default=30) form = "passbook.core.forms.policies.DebugPolicyForm" def passes(self, request: PolicyRequest) -> PolicyResult: """Wait random time then return result""" wait = SystemRandom().randrange(self.wait_min, self.wait_max) LOGGER.debug("Policy waiting", policy=self, delay=wait) sleep(wait) return PolicyResult(self.result, "Debugging") class Meta: verbose_name = _("Debug Policy") verbose_name_plural = _("Debug Policies") class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel): """Single-use invitation link""" created_by = models.ForeignKey("User", on_delete=models.CASCADE) expires = models.DateTimeField(default=None, blank=True, null=True) fixed_username = models.TextField(blank=True, default=None) fixed_email = models.TextField(blank=True, default=None) needs_confirmation = models.BooleanField(default=True) @property def link(self): """Get link to use invitation""" return ( reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}" ) def __str__(self): return f"Invitation {self.uuid.hex} created by {self.created_by}" class Meta: verbose_name = _("Invitation") verbose_name_plural = _("Invitations") class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel): """One-time link for password resets/sign-up-confirmations""" expires = models.DateTimeField(default=default_nonce_duration) user = models.ForeignKey("User", on_delete=models.CASCADE) expiring = models.BooleanField(default=True) description = models.TextField(default="", blank=True) @property def is_expired(self) -> bool: """Check if nonce is expired yet.""" return now() > self.expires def __str__(self): return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})" class Meta: verbose_name = _("Nonce") verbose_name_plural = _("Nonces") class PropertyMapping(UUIDModel): """User-defined key -> x mapping which can be used by providers to expose extra data.""" name = models.TextField() expression = models.TextField() form = "" objects = InheritanceManager() def evaluate( self, user: Optional[User], request: Optional[HttpRequest], **kwargs ) -> Any: """Evaluate `self.expression` using `**kwargs`
Meta
identifier_name
models.py
_lazy from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ from django_prometheus.models import ExportModelOperationsMixin from guardian.mixins import GuardianUserMixin from jinja2 import Undefined from jinja2.exceptions import TemplateSyntaxError, UndefinedError from jinja2.nativetypes import NativeEnvironment from model_utils.managers import InheritanceManager from structlog import get_logger from passbook.core.exceptions import PropertyMappingExpressionException from passbook.core.signals import password_changed from passbook.core.types import UILoginButton, UIUserSettings from passbook.lib.models import CreatedUpdatedModel, UUIDModel from passbook.policies.exceptions import PolicyException from passbook.policies.types import PolicyRequest, PolicyResult LOGGER = get_logger() NATIVE_ENVIRONMENT = NativeEnvironment() def default_nonce_duration(): """Default duration a Nonce is valid""" return now() + timedelta(hours=4) class Group(ExportModelOperationsMixin("group"), UUIDModel): """Custom Group model which supports a basic hierarchy""" name = models.CharField(_("name"), max_length=80) parent = models.ForeignKey( "Group", blank=True, null=True, on_delete=models.SET_NULL, related_name="children", ) attributes = JSONField(default=dict, blank=True) def __str__(self): return f"Group {self.name}" class Meta: unique_together = (("name", "parent",),) class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser): """Custom User model to allow easier adding o f user-based settings""" uuid = models.UUIDField(default=uuid4, editable=False) name = models.TextField(help_text=_("User's display name.")) sources = models.ManyToManyField("Source", through="UserSourceConnection") groups = models.ManyToManyField("Group") password_change_date = models.DateTimeField(auto_now_add=True) attributes = JSONField(default=dict, blank=True) def set_password(self, password): if self.pk: password_changed.send(sender=self, user=self, password=password) self.password_change_date = now() return super().set_password(password) class Meta: permissions = (("reset_user_password", "Reset Password"),) class Provider(ExportModelOperationsMixin("provider"), models.Model): """Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application""" property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) objects = InheritanceManager() # This class defines no field for easier inheritance def __str__(self): if hasattr(self, "name"): return getattr(self, "name") return super().__str__() class PolicyModel(UUIDModel, CreatedUpdatedModel): """Base model which can have policies applied to it""" policies = models.ManyToManyField("Policy", blank=True) class Factor(ExportModelOperationsMixin("factor"), PolicyModel): """Authentication factor, multiple instances of the same Factor can be used""" name = models.TextField(help_text=_("Factor's display Name.")) slug = models.SlugField( unique=True, help_text=_("Internal factor name, used in URLs.") ) order = models.IntegerField() enabled = models.BooleanField(default=True) objects = InheritanceManager() type = "" form = "" @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return f"Factor {self.slug}" class Application(ExportModelOperationsMixin("application"), PolicyModel): """Every Application which uses passbook for authentication/identification/authorization needs an Application record. Other authentication types can subclass this Model to add custom fields and other properties""" name = models.TextField(help_text=_("Application's display Name.")) slug = models.SlugField(help_text=_("Internal application name, used in URLs.")) skip_authorization = models.BooleanField(default=False) provider = models.OneToOneField( "Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT ) meta_launch_url = models.URLField(default="", blank=True) meta_icon_url = models.TextField(default="", blank=True) meta_description = models.TextField(default="", blank=True) meta_publisher = models.TextField(default="", blank=True) objects = InheritanceManager() def get_provider(self) -> Optional[Provider]: """Get casted provider instance""" if not self.provider: return None return Provider.objects.get_subclass(pk=self.provider.pk) def __str__(self): return self.name class Source(ExportModelOperationsMixin("source"), PolicyModel): """Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server""" name = models.TextField(help_text=_("Source's display Name.")) slug = models.SlugField(help_text=_("Internal source name, used in URLs.")) enabled = models.BooleanField(default=True) property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) form = "" # ModelForm-based class ued to create/edit instance objects = InheritanceManager() @property def ui_login_button(self) -> Optional[UILoginButton]: """If source uses a http-based flow, return UI Information about the login button. If source doesn't use http-based flow, return None.""" return None @property def ui_additional_info(self) -> Optional[str]: """Return additional Info, such as a callback URL. Show in the administration interface.""" return None @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return self.name class UserSourceConnection(CreatedUpdatedModel): """Connection between User and Source.""" user = models.ForeignKey(User, on_delete=models.CASCADE) source = models.ForeignKey(Source, on_delete=models.CASCADE) class Meta: unique_together = (("user", "source"),) class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel): """Policies which specify if a user is authorized to use an Application. Can be overridden by other types to add other fields, more logic, etc.""" name = models.TextField(blank=True, null=True) negate = models.BooleanField(default=False) order = models.IntegerField(default=0) timeout = models.IntegerField(default=30) objects = InheritanceManager() def __str__(self): return f"Policy {self.name}" def passes(self, request: PolicyRequest) -> PolicyResult:
class DebugPolicy(Policy): """Policy used for debugging the PolicyEngine. Returns a fixed result, but takes a random time to process.""" result = models.BooleanField(default=False) wait_min = models.IntegerField(default=5) wait_max = models.IntegerField(default=30) form = "passbook.core.forms.policies.DebugPolicyForm" def passes(self, request: PolicyRequest) -> PolicyResult: """Wait random time then return result""" wait = SystemRandom().randrange(self.wait_min, self.wait_max) LOGGER.debug("Policy waiting", policy=self, delay=wait) sleep(wait) return PolicyResult(self.result, "Debugging") class Meta: verbose_name = _("Debug Policy") verbose_name_plural = _("Debug Policies") class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel): """Single-use invitation link""" created_by = models.ForeignKey("User", on_delete=models.CASCADE) expires = models.DateTimeField(default=None, blank=True, null=True) fixed_username = models.TextField(blank=True, default=None) fixed_email = models.TextField(blank=True, default=None) needs_confirmation = models.BooleanField(default=True) @property def link(self): """Get link to use invitation""" return ( reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}" ) def __str__(self): return f"Invitation {self.uuid.hex} created by {self.created_by}" class Meta: verbose_name = _("Invitation") verbose_name_plural = _("Invitations") class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel): """One-time link for password resets/sign-up-confirmations""" expires = models.DateTimeField(default=default_nonce_duration) user = models.ForeignKey("User", on_delete=models.CASCADE) expiring = models.BooleanField(default=True) description = models.TextField(default="", blank=True) @property def is_expired(self) -> bool: """Check if nonce is expired yet.""" return now() > self.expires def __str__(self): return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})" class Meta: verbose_name = _("Nonce") verbose_name_plural = _("Nonces") class PropertyMapping(UUIDModel): """User-defined key -> x mapping which can be used by providers to expose extra data.""" name = models.TextField() expression = models.TextField() form = "" objects = InheritanceManager() def evaluate( self, user: Optional[User], request: Optional[HttpRequest], **kwargs ) -> Any: """Evaluate `self.expression` using `**kwargs`
"""Check if user instance passes this policy""" raise PolicyException()
identifier_body
models.py
_lazy from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ from django_prometheus.models import ExportModelOperationsMixin from guardian.mixins import GuardianUserMixin from jinja2 import Undefined from jinja2.exceptions import TemplateSyntaxError, UndefinedError from jinja2.nativetypes import NativeEnvironment from model_utils.managers import InheritanceManager from structlog import get_logger from passbook.core.exceptions import PropertyMappingExpressionException from passbook.core.signals import password_changed from passbook.core.types import UILoginButton, UIUserSettings from passbook.lib.models import CreatedUpdatedModel, UUIDModel from passbook.policies.exceptions import PolicyException from passbook.policies.types import PolicyRequest, PolicyResult LOGGER = get_logger() NATIVE_ENVIRONMENT = NativeEnvironment() def default_nonce_duration(): """Default duration a Nonce is valid""" return now() + timedelta(hours=4) class Group(ExportModelOperationsMixin("group"), UUIDModel): """Custom Group model which supports a basic hierarchy""" name = models.CharField(_("name"), max_length=80) parent = models.ForeignKey( "Group", blank=True, null=True, on_delete=models.SET_NULL, related_name="children", ) attributes = JSONField(default=dict, blank=True) def __str__(self): return f"Group {self.name}" class Meta: unique_together = (("name", "parent",),) class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser): """Custom User model to allow easier adding o f user-based settings""" uuid = models.UUIDField(default=uuid4, editable=False) name = models.TextField(help_text=_("User's display name.")) sources = models.ManyToManyField("Source", through="UserSourceConnection") groups = models.ManyToManyField("Group") password_change_date = models.DateTimeField(auto_now_add=True) attributes = JSONField(default=dict, blank=True) def set_password(self, password): if self.pk: password_changed.send(sender=self, user=self, password=password) self.password_change_date = now() return super().set_password(password) class Meta: permissions = (("reset_user_password", "Reset Password"),) class Provider(ExportModelOperationsMixin("provider"), models.Model): """Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application""" property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) objects = InheritanceManager() # This class defines no field for easier inheritance def __str__(self): if hasattr(self, "name"): return getattr(self, "name") return super().__str__() class PolicyModel(UUIDModel, CreatedUpdatedModel): """Base model which can have policies applied to it""" policies = models.ManyToManyField("Policy", blank=True) class Factor(ExportModelOperationsMixin("factor"), PolicyModel): """Authentication factor, multiple instances of the same Factor can be used""" name = models.TextField(help_text=_("Factor's display Name.")) slug = models.SlugField( unique=True, help_text=_("Internal factor name, used in URLs.") ) order = models.IntegerField() enabled = models.BooleanField(default=True) objects = InheritanceManager() type = "" form = "" @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return f"Factor {self.slug}" class Application(ExportModelOperationsMixin("application"), PolicyModel): """Every Application which uses passbook for authentication/identification/authorization needs an Application record. Other authentication types can subclass this Model to add custom fields and other properties""" name = models.TextField(help_text=_("Application's display Name.")) slug = models.SlugField(help_text=_("Internal application name, used in URLs.")) skip_authorization = models.BooleanField(default=False) provider = models.OneToOneField( "Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT ) meta_launch_url = models.URLField(default="", blank=True) meta_icon_url = models.TextField(default="", blank=True) meta_description = models.TextField(default="", blank=True) meta_publisher = models.TextField(default="", blank=True) objects = InheritanceManager() def get_provider(self) -> Optional[Provider]: """Get casted provider instance""" if not self.provider: return None return Provider.objects.get_subclass(pk=self.provider.pk) def __str__(self): return self.name class Source(ExportModelOperationsMixin("source"), PolicyModel): """Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server""" name = models.TextField(help_text=_("Source's display Name.")) slug = models.SlugField(help_text=_("Internal source name, used in URLs.")) enabled = models.BooleanField(default=True) property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) form = "" # ModelForm-based class ued to create/edit instance objects = InheritanceManager() @property def ui_login_button(self) -> Optional[UILoginButton]: """If source uses a http-based flow, return UI Information about the login button. If source doesn't use http-based flow, return None.""" return None @property def ui_additional_info(self) -> Optional[str]: """Return additional Info, such as a callback URL. Show in the administration interface.""" return None @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return self.name class UserSourceConnection(CreatedUpdatedModel): """Connection between User and Source.""" user = models.ForeignKey(User, on_delete=models.CASCADE) source = models.ForeignKey(Source, on_delete=models.CASCADE) class Meta: unique_together = (("user", "source"),) class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel): """Policies which specify if a user is authorized to use an Application. Can be overridden by other types to add other fields, more logic, etc.""" name = models.TextField(blank=True, null=True) negate = models.BooleanField(default=False) order = models.IntegerField(default=0) timeout = models.IntegerField(default=30) objects = InheritanceManager() def __str__(self): return f"Policy {self.name}" def passes(self, request: PolicyRequest) -> PolicyResult: """Check if user instance passes this policy"""
"""Policy used for debugging the PolicyEngine. Returns a fixed result, but takes a random time to process.""" result = models.BooleanField(default=False) wait_min = models.IntegerField(default=5) wait_max = models.IntegerField(default=30) form = "passbook.core.forms.policies.DebugPolicyForm" def passes(self, request: PolicyRequest) -> PolicyResult: """Wait random time then return result""" wait = SystemRandom().randrange(self.wait_min, self.wait_max) LOGGER.debug("Policy waiting", policy=self, delay=wait) sleep(wait) return PolicyResult(self.result, "Debugging") class Meta: verbose_name = _("Debug Policy") verbose_name_plural = _("Debug Policies") class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel): """Single-use invitation link""" created_by = models.ForeignKey("User", on_delete=models.CASCADE) expires = models.DateTimeField(default=None, blank=True, null=True) fixed_username = models.TextField(blank=True, default=None) fixed_email = models.TextField(blank=True, default=None) needs_confirmation = models.BooleanField(default=True) @property def link(self): """Get link to use invitation""" return ( reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}" ) def __str__(self): return f"Invitation {self.uuid.hex} created by {self.created_by}" class Meta: verbose_name = _("Invitation") verbose_name_plural = _("Invitations") class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel): """One-time link for password resets/sign-up-confirmations""" expires = models.DateTimeField(default=default_nonce_duration) user = models.ForeignKey("User", on_delete=models.CASCADE) expiring = models.BooleanField(default=True) description = models.TextField(default="", blank=True) @property def is_expired(self) -> bool: """Check if nonce is expired yet.""" return now() > self.expires def __str__(self): return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})" class Meta: verbose_name = _("Nonce") verbose_name_plural = _("Nonces") class PropertyMapping(UUIDModel): """User-defined key -> x mapping which can be used by providers to expose extra data.""" name = models.TextField() expression = models.TextField() form = "" objects = InheritanceManager() def evaluate( self, user: Optional[User], request: Optional[HttpRequest], **kwargs ) -> Any: """Evaluate `self.expression` using `**kwargs` as
raise PolicyException() class DebugPolicy(Policy):
random_line_split
models.py
_lazy from django.utils.timezone import now from django.utils.translation import gettext_lazy as _ from django_prometheus.models import ExportModelOperationsMixin from guardian.mixins import GuardianUserMixin from jinja2 import Undefined from jinja2.exceptions import TemplateSyntaxError, UndefinedError from jinja2.nativetypes import NativeEnvironment from model_utils.managers import InheritanceManager from structlog import get_logger from passbook.core.exceptions import PropertyMappingExpressionException from passbook.core.signals import password_changed from passbook.core.types import UILoginButton, UIUserSettings from passbook.lib.models import CreatedUpdatedModel, UUIDModel from passbook.policies.exceptions import PolicyException from passbook.policies.types import PolicyRequest, PolicyResult LOGGER = get_logger() NATIVE_ENVIRONMENT = NativeEnvironment() def default_nonce_duration(): """Default duration a Nonce is valid""" return now() + timedelta(hours=4) class Group(ExportModelOperationsMixin("group"), UUIDModel): """Custom Group model which supports a basic hierarchy""" name = models.CharField(_("name"), max_length=80) parent = models.ForeignKey( "Group", blank=True, null=True, on_delete=models.SET_NULL, related_name="children", ) attributes = JSONField(default=dict, blank=True) def __str__(self): return f"Group {self.name}" class Meta: unique_together = (("name", "parent",),) class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser): """Custom User model to allow easier adding o f user-based settings""" uuid = models.UUIDField(default=uuid4, editable=False) name = models.TextField(help_text=_("User's display name.")) sources = models.ManyToManyField("Source", through="UserSourceConnection") groups = models.ManyToManyField("Group") password_change_date = models.DateTimeField(auto_now_add=True) attributes = JSONField(default=dict, blank=True) def set_password(self, password): if self.pk: password_changed.send(sender=self, user=self, password=password) self.password_change_date = now() return super().set_password(password) class Meta: permissions = (("reset_user_password", "Reset Password"),) class Provider(ExportModelOperationsMixin("provider"), models.Model): """Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application""" property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) objects = InheritanceManager() # This class defines no field for easier inheritance def __str__(self): if hasattr(self, "name"):
return super().__str__() class PolicyModel(UUIDModel, CreatedUpdatedModel): """Base model which can have policies applied to it""" policies = models.ManyToManyField("Policy", blank=True) class Factor(ExportModelOperationsMixin("factor"), PolicyModel): """Authentication factor, multiple instances of the same Factor can be used""" name = models.TextField(help_text=_("Factor's display Name.")) slug = models.SlugField( unique=True, help_text=_("Internal factor name, used in URLs.") ) order = models.IntegerField() enabled = models.BooleanField(default=True) objects = InheritanceManager() type = "" form = "" @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return f"Factor {self.slug}" class Application(ExportModelOperationsMixin("application"), PolicyModel): """Every Application which uses passbook for authentication/identification/authorization needs an Application record. Other authentication types can subclass this Model to add custom fields and other properties""" name = models.TextField(help_text=_("Application's display Name.")) slug = models.SlugField(help_text=_("Internal application name, used in URLs.")) skip_authorization = models.BooleanField(default=False) provider = models.OneToOneField( "Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT ) meta_launch_url = models.URLField(default="", blank=True) meta_icon_url = models.TextField(default="", blank=True) meta_description = models.TextField(default="", blank=True) meta_publisher = models.TextField(default="", blank=True) objects = InheritanceManager() def get_provider(self) -> Optional[Provider]: """Get casted provider instance""" if not self.provider: return None return Provider.objects.get_subclass(pk=self.provider.pk) def __str__(self): return self.name class Source(ExportModelOperationsMixin("source"), PolicyModel): """Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server""" name = models.TextField(help_text=_("Source's display Name.")) slug = models.SlugField(help_text=_("Internal source name, used in URLs.")) enabled = models.BooleanField(default=True) property_mappings = models.ManyToManyField( "PropertyMapping", default=None, blank=True ) form = "" # ModelForm-based class ued to create/edit instance objects = InheritanceManager() @property def ui_login_button(self) -> Optional[UILoginButton]: """If source uses a http-based flow, return UI Information about the login button. If source doesn't use http-based flow, return None.""" return None @property def ui_additional_info(self) -> Optional[str]: """Return additional Info, such as a callback URL. Show in the administration interface.""" return None @property def ui_user_settings(self) -> Optional[UIUserSettings]: """Entrypoint to integrate with User settings. Can either return None if no user settings are available, or an instanace of UIUserSettings.""" return None def __str__(self): return self.name class UserSourceConnection(CreatedUpdatedModel): """Connection between User and Source.""" user = models.ForeignKey(User, on_delete=models.CASCADE) source = models.ForeignKey(Source, on_delete=models.CASCADE) class Meta: unique_together = (("user", "source"),) class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel): """Policies which specify if a user is authorized to use an Application. Can be overridden by other types to add other fields, more logic, etc.""" name = models.TextField(blank=True, null=True) negate = models.BooleanField(default=False) order = models.IntegerField(default=0) timeout = models.IntegerField(default=30) objects = InheritanceManager() def __str__(self): return f"Policy {self.name}" def passes(self, request: PolicyRequest) -> PolicyResult: """Check if user instance passes this policy""" raise PolicyException() class DebugPolicy(Policy): """Policy used for debugging the PolicyEngine. Returns a fixed result, but takes a random time to process.""" result = models.BooleanField(default=False) wait_min = models.IntegerField(default=5) wait_max = models.IntegerField(default=30) form = "passbook.core.forms.policies.DebugPolicyForm" def passes(self, request: PolicyRequest) -> PolicyResult: """Wait random time then return result""" wait = SystemRandom().randrange(self.wait_min, self.wait_max) LOGGER.debug("Policy waiting", policy=self, delay=wait) sleep(wait) return PolicyResult(self.result, "Debugging") class Meta: verbose_name = _("Debug Policy") verbose_name_plural = _("Debug Policies") class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel): """Single-use invitation link""" created_by = models.ForeignKey("User", on_delete=models.CASCADE) expires = models.DateTimeField(default=None, blank=True, null=True) fixed_username = models.TextField(blank=True, default=None) fixed_email = models.TextField(blank=True, default=None) needs_confirmation = models.BooleanField(default=True) @property def link(self): """Get link to use invitation""" return ( reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}" ) def __str__(self): return f"Invitation {self.uuid.hex} created by {self.created_by}" class Meta: verbose_name = _("Invitation") verbose_name_plural = _("Invitations") class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel): """One-time link for password resets/sign-up-confirmations""" expires = models.DateTimeField(default=default_nonce_duration) user = models.ForeignKey("User", on_delete=models.CASCADE) expiring = models.BooleanField(default=True) description = models.TextField(default="", blank=True) @property def is_expired(self) -> bool: """Check if nonce is expired yet.""" return now() > self.expires def __str__(self): return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})" class Meta: verbose_name = _("Nonce") verbose_name_plural = _("Nonces") class PropertyMapping(UUIDModel): """User-defined key -> x mapping which can be used by providers to expose extra data.""" name = models.TextField() expression = models.TextField() form = "" objects = InheritanceManager() def evaluate( self, user: Optional[User], request: Optional[HttpRequest], **kwargs ) -> Any: """Evaluate `self.expression` using `**kwargs`
return getattr(self, "name")
conditional_block
Ui_Offer_Home.py
_16px_528836_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.Button_audit.setIcon(icon1) self.Button_audit.setObjectName("Button_audit") self.horizontalLayout.addWidget(self.Button_audit) self.Box_group = QtWidgets.QComboBox(Offer_Home) self.Box_group.setObjectName("Box_group") self.horizontalLayout.addWidget(self.Box_group) self.Box_filter = QtWidgets.QComboBox(Offer_Home) self.Box_filter.setObjectName("Box_filter") self.horizontalLayout.addWidget(self.Box_filter) self.Line_search = QtWidgets.QLineEdit(Offer_Home) self.Line_search.setText("") self.Line_search.setObjectName("Line_search") self.horizontalLayout.addWidget(self.Line_search) self.verticalLayout.addLayout(self.horizontalLayout) #连接数据库 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) cur = db.cursor() cur.execute("SELECT * FROM 报价基本信息") data = cur.fetchall() #接收全部的返回结果行 col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类 #数据的大小 row = len(data) #获得data的行数 vol = len(data[0]) #获得data的卷数.第一行的数量(列数) #插入表格 self.Widget_catalog = QTableWidget(row,vol) #目录表 self.Widget_details = QTableWidget(row,vol) #明细表 font = QtGui.QFont('微软雅黑',9) #设置字体、表头 self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体 self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题 #设置竖直方向表头不可见 # self.Widget_catalog.verticalHeader().setVisible(False) self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框 #设置表格颜色 self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单 self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽 # self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑 self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中 self.verticalLayout.addWidget(self.Widget_catalog) #构建表格插入数据 for i in range(row): #i到row-1的数量 for j in range(vol): temp_data = data[i][j] # 临时记录,不能直接插入表格 data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data1) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catalog.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度 db.close cur.close #报价明细区域 # db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) conn = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'" conn.execute(sql) col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类 vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0]) self.Widget_details = QTableWidget(100,vol_1) self.Widget_details.setHorizontalHeaderLabels(col_lst_1) # self.Widget_details.verticalHeader().setVisible(False) self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框 self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers) self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_details.setObjectName("报价明细") self.verticalLayout.addWidget(self.Widget_details) self.Widget_details.resizeColumnsToContents() #自适应字段宽度 db.close conn.close self.retranslateUi(Offer_Home) QtCore.QMetaObject.connectSlotsByName(Offer_Home) #测试显示报价明细 # self.Button_offernew.clicked.connect(self.querycl) self.Widget_catalog.itemClicked.connect(self.querydt) self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db # self.Button_offernew.clicked.connect(self.msg) def generateMenu(self, pos): row_num = -1 for i in self.Widget_catalog.selectionModel().selection().indexes(): row_num = i.row() if row_num < 2 : menu = QMenu() item1 = menu.addAction(u"通过") item2 = menu.addAction(u"未通过") action = menu.exec_(self.Widget_catalog.mapToGlobal(pos)) if action == item1: print('你选了通过') elif action == item2: print('你选了未通过') else: return def querycl(self, db):#查询报价目录 lsearch = self.Line_search.text() #搜索框 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) curr = db.cursor() print('you search=> '+ lsearch) sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'" curr.execute(sql) self.Widget_catalog.clearContents() data_2 = curr.fetchall() row_2 = len(data_2) #获得data的行数 vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_2): #i到row-1的数量 for j in range(vol_2): temp_data = data_2[i][j] # 临时记录,不能直接插入表格 data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data2) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catalog.resizeRowsToContents() #自适应行高 def querydt(self):#查询报价明细querydt(self, item) # print('you selected => '+ item.text()) # self.Line_search.setText(item.text()) #搜索框等于点击表格的值 h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) cur_3 = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'" cur_3.execute(sql) self.Widget_details.clearContents() data_3 = cur_3.fetchall() row_3 = len(data_3) #获得data的行数 vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_3): #i到row-1的数量 for
j in range(vol_3): temp_data = data_3[i][j] # 临时记录,不能直接插入表格 data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_details.setItem(i, j, data3) self.Widget_details.resizeColumnsToContents() #自适应宽度 self.Widget_details.resizeRowsToContents() #自适应行高 #更新数据 def up_data(self,cur,db): h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号 print('you bjdh=> '+ bjdh) cur.execute("UPDATE 报价基本信息 SET 状态='通过' WHERE 报价单号 = '"+bjdh+"'") db.commit() reply = QMessageBox.information(QWidget(), "标题", "审核成功" ) print( reply ) # def msg(self): # reply = QMessageBox.information(QWidget(), "标题", "对话框消息正文", QMessageBox.Yes | QMessageBox.No , QMessageBox.Yes )
identifier_body
Ui_Offer_Home.py
db='mrp',charset='utf8',) cur = db.cursor() cur.execute("SELECT * FROM 报价基本信息") data = cur.fetchall() #接收全部的返回结果行 col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类 #数据的大小 row = len(data) #获得data的行数 vol = len(data[0]) #获得data的卷数.第一行的数量(列数) #插入表格 self.Widget_catalog = QTableWidget(row,vol) #目录表 self.Widget_details = QTableWidget(row,vol) #明细表 font = QtGui.QFont('微软雅黑',9) #设置字体、表头 self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体 self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题 #设置竖直方向表头不可见 # self.Widget_catalog.verticalHeader().setVisible(False) self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框 #设置表格颜色 self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单 self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽 # self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑 self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中 self.verticalLayout.addWidget(self.Widget_catalog) #构建表格插入数据 for i in range(row): #i到row-1的数量 for j in range(vol): temp_data = data[i][j] # 临时记录,不能直接插入表格 data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data1) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catalog.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度 db.close cur.close #报价明细区域 # db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) conn = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'" conn.execute(sql) col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类 vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0]) self.Widget_details = QTableWidget(100,vol_1) self.Widget_details.setHorizontalHeaderLabels(col_lst_1) # self.Widget_details.verticalHeader().setVisible(False) self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框 self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers) self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_details.setObjectName("报价明细") self.verticalLayout.addWidget(self.Widget_details) self.Widget_details.resizeColumnsToContents() #自适应字段宽度 db.close conn.close self.retranslateUi(Offer_Home) QtCore.QMetaObject.connectSlotsByName(Offer_Home) #测试显示报价明细 # self.Button_offernew.clicked.connect(self.querycl) self.Widget_catalog.itemClicked.connect(self.querydt) self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db # self.Button_offernew.clicked.connect(self.msg) def generateMenu(self, pos): row_num = -1 for i in self.Widget_catalog.selectionModel().selection().indexes(): row_num = i.row() if row_num < 2 : menu = QMenu() item1 = menu.addAction(u"通过") item2 = menu.addAction(u"未通过") action = menu.exec_(self.Widget_catalog.mapToGlobal(pos)) if action == item1: print('你选了通过') elif action == item2: print('你选了未通过') else: return def querycl(self, db):#查询报价目录 lsearch = self.Line_search.text() #搜索框 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) curr = db.cursor() print('you search=> '+ lsearch) sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'" curr.execute(sql) self.Widget_catalog.clearContents() data_2 = curr.fetchall() row_2 = len(data_2) #获得data的行数 vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_2): #i到row-1的数量 for j in range(vol_2): temp_data = data_2[i][j] # 临时记录,不能直接插入表格 data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data2) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catalog.resizeRowsToContents() #自适应行高 def querydt(self):#查询报价明细querydt(self, item) # print('you selected => '+ item.text()) # self.Line_search.setText(item.text()) #搜索框等于点击表格的值 h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) cur_3 = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'" cur_3.execute(sql) self.Widget_details.clearContents() data_3 = cur_3.fetchall() row_3 = len(data_3) #获得data的行数 vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_3): #i到row-1的数量 for j in range(vol_3): temp_data = data_3[i][j] # 临时记录,不能直接插入表格 data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_details.setItem(i, j, data3) self.Widget_details.resizeColumnsToContents() #自适应宽度 self.Widget_details.resizeRowsToContents() #自适应行高 #更新数据 def up_data(self,cur,db): h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号 print('you bjdh=> '+ bjdh) cur.execute("UPDATE 报价基本信息 SET 状态='通过' WHERE 报价单号 = '"+bjdh+"'") db.commit() reply = QMessageBox.information(QWidget(), "标题", "审核成功" ) print( reply ) # def msg(self): # reply = QMessageBox.information(QWidget(), "标题", "对话框消息正文", QMessageBox.Yes | QMessageBox.No , QMessageBox.Yes ) # print( reply ) def retranslateUi(self, Offer_Home): _translate = QtCore.QCoreApplication.translate Offer_Home.setWindowTitle(_translate("Offer_Home", "报价首页")) self.Button_offernew.setText(_translate("Offer_Home", "新建")) self.Button_audit.setText(_translate("Offer_Home", "审核")) self.Box_group.setToolTip(_translate("Offer_Home", "分组")) self.Box_filter.setToolTip(_translate("Offer_Home", "筛选")) self.Line_search.setToolTip(_translate("Offer_Home", "搜索")) self.Line_search.setPlaceholderText(_translate("Offer_Home", "搜索....")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Offer_Home = QtWidgets.QWidget() ui = Ui_Offer_Home() ui.setupUi(Offer_Home) Offer_Home.show() sys.exit(app.exec_())
identifier_name
Ui_Offer_Home.py
.setToolTip("") self.verticalLayout = QtWidgets.QVBoxLayout(Offer_Home) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.Button_offernew = QtWidgets.QPushButton(Offer_Home) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("images/Add_16px_528841_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.Button_offernew.setIcon(icon) self.Button_offernew.setObjectName("Button_offernew") self.horizontalLayout.addWidget(self.Button_offernew) self.Button_audit = QtWidgets.QPushButton(Offer_Home) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap("images/Accept_16px_528836_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.Button_audit.setIcon(icon1) self.Button_audit.setObjectName("Button_audit") self.horizontalLayout.addWidget(self.Button_audit) self.Box_group = QtWidgets.QComboBox(Offer_Home) self.Box_group.setObjectName("Box_group") self.horizontalLayout.addWidget(self.Box_group) self.Box_filter = QtWidgets.QComboBox(Offer_Home) self.Box_filter.setObjectName("Box_filter") self.horizontalLayout.addWidget(self.Box_filter) self.Line_search = QtWidgets.QLineEdit(Offer_Home) self.Line_search.setText("") self.Line_search.setObjectName("Line_search") self.horizontalLayout.addWidget(self.Line_search) self.verticalLayout.addLayout(self.horizontalLayout) #连接数据库 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) cur = db.cursor() cur.execute("SELECT * FROM 报价基本信息") data = cur.fetchall() #接收全部的返回结果行 col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类 #数据的大小 row = len(data) #获得data的行数 vol = len(data[0]) #获得data的卷数.第一行的数量(列数) #插入表格 self.Widget_catalog = QTableWidget(row,vol) #目录表 self.Widget_details = QTableWidget(row,vol) #明细表 font = QtGui.QFont('微软雅黑',9) #设置字体、表头 self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体 self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题 #设置竖直方向表头不可见 # self.Widget_catalog.verticalHeader().setVisible(False) self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框 #设置表格颜色 self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单 self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽 # self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑 self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中 self.verticalLayout.addWidget(self.Widget_catalog) #构建表格插入数据 for i in range(row): #i到row-1的数量 for j in range(vol): temp_data = data[i][j] # 临时记录,不能直接插入表格 data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data1) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catal
conn = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'" conn.execute(sql) col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类 vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0]) self.Widget_details = QTableWidget(100,vol_1) self.Widget_details.setHorizontalHeaderLabels(col_lst_1) # self.Widget_details.verticalHeader().setVisible(False) self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框 self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers) self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_details.setObjectName("报价明细") self.verticalLayout.addWidget(self.Widget_details) self.Widget_details.resizeColumnsToContents() #自适应字段宽度 db.close conn.close self.retranslateUi(Offer_Home) QtCore.QMetaObject.connectSlotsByName(Offer_Home) #测试显示报价明细 # self.Button_offernew.clicked.connect(self.querycl) self.Widget_catalog.itemClicked.connect(self.querydt) self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db # self.Button_offernew.clicked.connect(self.msg) def generateMenu(self, pos): row_num = -1 for i in self.Widget_catalog.selectionModel().selection().indexes(): row_num = i.row() if row_num < 2 : menu = QMenu() item1 = menu.addAction(u"通过") item2 = menu.addAction(u"未通过") action = menu.exec_(self.Widget_catalog.mapToGlobal(pos)) if action == item1: print('你选了通过') elif action == item2: print('你选了未通过') else: return def querycl(self, db):#查询报价目录 lsearch = self.Line_search.text() #搜索框 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) curr = db.cursor() print('you search=> '+ lsearch) sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'" curr.execute(sql) self.Widget_catalog.clearContents() data_2 = curr.fetchall() row_2 = len(data_2) #获得data的行数 vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_2): #i到row-1的数量 for j in range(vol_2): temp_data = data_2[i][j] # 临时记录,不能直接插入表格 data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data2) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catalog.resizeRowsToContents() #自适应行高 def querydt(self):#查询报价明细querydt(self, item) # print('you selected => '+ item.text()) # self.Line_search.setText(item.text()) #搜索框等于点击表格的值 h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) cur_3 = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'" cur_3.execute(sql) self.Widget_details.clearContents() data_3 = cur_3.fetchall() row_3 = len(data_3) #获得data的行数 vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_3): #i到row-1的数量 for j in range(vol_3): temp_data = data_3[i][j] # 临时记录,不能直接插入表格 data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_details.setItem(i, j, data3) self.Widget_details.resizeColumnsToContents() #自适应宽度 self.Widget_details.resizeRowsToContents() #自适应行高
og.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度 db.close cur.close #报价明细区域 # db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
conditional_block
Ui_Offer_Home.py
ome.setToolTip("")
self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.Button_offernew = QtWidgets.QPushButton(Offer_Home) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("images/Add_16px_528841_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.Button_offernew.setIcon(icon) self.Button_offernew.setObjectName("Button_offernew") self.horizontalLayout.addWidget(self.Button_offernew) self.Button_audit = QtWidgets.QPushButton(Offer_Home) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap("images/Accept_16px_528836_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.Button_audit.setIcon(icon1) self.Button_audit.setObjectName("Button_audit") self.horizontalLayout.addWidget(self.Button_audit) self.Box_group = QtWidgets.QComboBox(Offer_Home) self.Box_group.setObjectName("Box_group") self.horizontalLayout.addWidget(self.Box_group) self.Box_filter = QtWidgets.QComboBox(Offer_Home) self.Box_filter.setObjectName("Box_filter") self.horizontalLayout.addWidget(self.Box_filter) self.Line_search = QtWidgets.QLineEdit(Offer_Home) self.Line_search.setText("") self.Line_search.setObjectName("Line_search") self.horizontalLayout.addWidget(self.Line_search) self.verticalLayout.addLayout(self.horizontalLayout) #连接数据库 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) cur = db.cursor() cur.execute("SELECT * FROM 报价基本信息") data = cur.fetchall() #接收全部的返回结果行 col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类 #数据的大小 row = len(data) #获得data的行数 vol = len(data[0]) #获得data的卷数.第一行的数量(列数) #插入表格 self.Widget_catalog = QTableWidget(row,vol) #目录表 self.Widget_details = QTableWidget(row,vol) #明细表 font = QtGui.QFont('微软雅黑',9) #设置字体、表头 self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体 self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题 #设置竖直方向表头不可见 # self.Widget_catalog.verticalHeader().setVisible(False) self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框 #设置表格颜色 self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单 self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽 # self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑 self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中 self.verticalLayout.addWidget(self.Widget_catalog) #构建表格插入数据 for i in range(row): #i到row-1的数量 for j in range(vol): temp_data = data[i][j] # 临时记录,不能直接插入表格 data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data1) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catalog.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度 db.close cur.close #报价明细区域 # db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) conn = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'" conn.execute(sql) col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类 vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0]) self.Widget_details = QTableWidget(100,vol_1) self.Widget_details.setHorizontalHeaderLabels(col_lst_1) # self.Widget_details.verticalHeader().setVisible(False) self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框 self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers) self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}') self.Widget_details.setObjectName("报价明细") self.verticalLayout.addWidget(self.Widget_details) self.Widget_details.resizeColumnsToContents() #自适应字段宽度 db.close conn.close self.retranslateUi(Offer_Home) QtCore.QMetaObject.connectSlotsByName(Offer_Home) #测试显示报价明细 # self.Button_offernew.clicked.connect(self.querycl) self.Widget_catalog.itemClicked.connect(self.querydt) self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db # self.Button_offernew.clicked.connect(self.msg) def generateMenu(self, pos): row_num = -1 for i in self.Widget_catalog.selectionModel().selection().indexes(): row_num = i.row() if row_num < 2 : menu = QMenu() item1 = menu.addAction(u"通过") item2 = menu.addAction(u"未通过") action = menu.exec_(self.Widget_catalog.mapToGlobal(pos)) if action == item1: print('你选了通过') elif action == item2: print('你选了未通过') else: return def querycl(self, db):#查询报价目录 lsearch = self.Line_search.text() #搜索框 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) curr = db.cursor() print('you search=> '+ lsearch) sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'" curr.execute(sql) self.Widget_catalog.clearContents() data_2 = curr.fetchall() row_2 = len(data_2) #获得data的行数 vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_2): #i到row-1的数量 for j in range(vol_2): temp_data = data_2[i][j] # 临时记录,不能直接插入表格 data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_catalog.setItem(i, j, data2) self.Widget_catalog.resizeColumnsToContents() #自适应宽度 self.Widget_catalog.resizeRowsToContents() #自适应行高 def querydt(self):#查询报价明细querydt(self, item) # print('you selected => '+ item.text()) # self.Line_search.setText(item.text()) #搜索框等于点击表格的值 h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号 db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',) cur_3 = db.cursor() sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'" cur_3.execute(sql) self.Widget_details.clearContents() data_3 = cur_3.fetchall() row_3 = len(data_3) #获得data的行数 vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0]) #构建表格插入数据 for i in range(row_3): #i到row-1的数量 for j in range(vol_3): temp_data = data_3[i][j] # 临时记录,不能直接插入表格 data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格 self.Widget_details.setItem(i, j, data3) self.Widget_details.resizeColumnsToContents() #自适应宽度 self.Widget_details.resizeRowsToContents() #自适应行高 #
self.verticalLayout = QtWidgets.QVBoxLayout(Offer_Home)
random_line_split
utils.rs
.to_string())) } pub fn address_to_string(address: &Address) -> String { format!("0x{}", address.encode_hex::<String>()) } #[derive(Debug, Clone, Copy)] pub enum UploadMode { Auto, Azure, Direct, } pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> { match upload_mode { "auto" => Ok(UploadMode::Auto), "azure" => Ok(UploadMode::Azure), "direct" => Ok(UploadMode::Direct), _ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()), } } #[derive(Debug, PartialEq, Clone, Copy)] pub enum ParticipationMode { Contribute, Verify, } pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> { match participation_mode { "contribute" => Ok(ParticipationMode::Contribute), "verify" => Ok(ParticipationMode::Verify), _ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()), } } fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> { let decoded = SecretVec::new(hex::decode(encrypted)?); let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?; let mut output = vec![]; if let age::Decryptor::Passphrase(decryptor) = decryptor { let mut reader = decryptor.decrypt(passphrase, None)?; reader.read_to_end(&mut output)?; } else { return Err(UtilsError::UnsupportedDecryptorError.into()); } Ok(output) } pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> { let mut encrypted_output = vec![]; let mut writer = encryptor .wrap_output(ArmoredWriter::wrap_output( &mut encrypted_output, Format::Binary, )?) .map_err(|e| match e { EncryptError::Io(e) => e, })?; std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?; writer.finish()?; let encrypted_secret = hex::encode(&encrypted_output); Ok(encrypted_secret.to_string()) } pub fn read_keys( keys_file: &str, should_use_stdin: bool, should_collect_extra_entropy: bool, ) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> { let mut contents = String::new(); { std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?; } let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?; let description = "Enter your Plumo setup passphrase"; let passphrase = if should_use_stdin { println!("{}:", description); SecretString::new(rpassword::read_password()?) } else { age::cli_common::read_secret(description, "Passphrase", None) .map_err(|_| UtilsError::CouldNotReadPassphraseError)? }; let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?); let plumo_private_key_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?); if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() && !should_use_stdin { let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)"; let entered_entropy = age::cli_common::read_secret(description, "Entropy", None) .map_err(|_| UtilsError::CouldNotReadEntropyError)?; let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone()); let mut rng = OsRng; let mut extra_entropy = vec![0u8; 64]; rng.fill_bytes(&mut extra_entropy[..]); let extra_entropy = SecretVec::new(extra_entropy); let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION); hasher.update(extra_entropy.expose_secret()); hasher.update(entered_entropy.expose_secret()); let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec()); let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?; keys.encrypted_extra_entropy = Some(encrypted_extra_entropy); let mut file = OpenOptions::new().write(true).open(&keys_file)?; file.write_all(&serde_json::to_vec(&keys)?)?; file.sync_all()?; } let plumo_seed = match keys.encrypted_extra_entropy { None => plumo_seed_from_file, Some(encrypted_entropy) => { let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?); let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION); hasher.update(plumo_seed_from_file.expose_secret()); hasher.update(entropy.expose_secret()); SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec()) } }; Ok((plumo_seed, plumo_private_key_from_file, keys.attestation)) } pub fn collect_processor_data() -> Result<Vec<ProcessorData>> { cfg_if::cfg_if! { if #[cfg(not(target_arch = "aarch64"))] { use sysinfo::{ProcessorExt, System, SystemExt}; let s = System::new(); let processors = s .get_processors() .iter() .map(|p| ProcessorData { name: p.get_name().to_string(), brand: p.get_brand().to_string(), frequency: p.get_frequency().to_string(), }) .collect(); Ok(processors) } else { Ok(vec![]) } } } pub struct MaxRetriesHandler { max_attempts: usize, } impl MaxRetriesHandler { pub fn new(max_attempts: usize) -> Self { MaxRetriesHandler { max_attempts } } } impl ErrorHandler<anyhow::Error> for MaxRetriesHandler { type OutError = anyhow::Error; fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> { warn!( "Failed: {}, retry {}/{}", e.to_string(), attempt, self.max_attempts, ); if attempt >= self.max_attempts { RetryPolicy::ForwardError(e) } else { RetryPolicy::WaitRetry( chrono::Duration::seconds(5) .to_std() .expect("Should have converted to standard duration"), ) } } } pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 { parameters.accumulator_size as u64 } pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 { parameters.contribution_size as u64 } pub fn load_transcript() -> Result<Transcript> { let filename = "transcript"; if !std::path::Path::new(filename).exists() { let mut file = File::create(filename)?; file.write_all( serde_json::to_string_pretty(&Transcript { rounds: vec![], beacon_hash: None, final_hash: None, })? .as_bytes(), )?; } let mut file = File::open(filename)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?; Ok(transcript) } pub fn save_transcript(transcript: &Transcript) -> Result<()> { let filename = "transcript"; let mut file = File::create(filename)?; file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?; Ok(()) } pub fn backup_transcript(transcript: &Transcript) -> Result<()> { let filename = format!("transcript_{}", chrono::Utc::now().timestamp_nanos()); let mut file = File::create(filename)?; file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?; Ok(()) } pub fn format_attestation(attestation_message: &str, address: &str, signature: &str) -> String { format!("{} {} {}", attestation_message, address, signature) } pub fn extract_signature_from_attestation(attestation: &str) -> Result<(String, String, String)> { let attestation = attestation.to_string(); let attestation_parts = attestation.split(" ").collect::<Vec<_>>(); if attestation_parts.len() < 3 { return Err(UtilsError::AttestationTooShort(attestation_parts.len()).into()); } Ok(( attestation_parts[0..=attestation_parts.len() - 3].join(" "), attestation_parts[attestation_parts.len() - 2].to_string(), attestation_parts[attestation_parts.len() - 1].to_string(), )) } pub fn write_attestation_to_file(attestation: &Attestation, path: &str) -> Result<()>
{ File::create(path)?.write_all( format_attestation( &attestation.id, &attestation.address, &attestation.signature, ) .as_bytes(), )?; Ok(()) }
identifier_body
utils.rs
ANGE, format!("bytes={}-{}", start, end)) .header(CONTENT_LENGTH, 0) .timeout(std::time::Duration::from_secs( DEFAULT_CHUNK_TIMEOUT_IN_SECONDS, )) .send() .await? .error_for_status()?; let mut bytes = Vec::with_capacity((end - start + 1) as usize); while let Some(chunk) = resp.chunk().await? { bytes.write_all(&chunk)?; } Ok::<Vec<u8>, anyhow::Error>(bytes) } }, MaxRetriesHandler::new(DEFAULT_MAX_RETRIES), ))); } let bytes_list = futures::future::try_join_all(futures) .await? .into_iter() .collect::<Result<Vec<_>, _>>() .map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))? .into_iter() .map(|(v, _)| v); for bytes in bytes_list { out.write_all(&bytes)?; } Ok(()) } pub async fn download_file_direct_async(url: &str, file_path: &str) -> Result<()> { let url = url.to_string(); let file_path = file_path.to_string(); FutureRetry::new( || async { remove_file_if_exists(&file_path)?; let mut resp = reqwest::get(&url).await?.error_for_status()?; let mut out = File::create(&file_path)?; while let Some(chunk) = resp.chunk().await? { out.write_all(&chunk)?; } Ok(()) }, MaxRetriesHandler::new(DEFAULT_MAX_RETRIES), ) .await .map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?; Ok(()) } pub async fn upload_file_to_azure_async(file_path: &str, url: &str) -> Result<()> { upload_sas(file_path, url).await?; Ok(()) } pub async fn upload_file_to_azure_with_access_key_async( file_path: &str, access_key: &str, account: &str, container: &str, path: &str, ) -> Result<()> { upload_access_key(file_path, access_key, account, container, path).await?; Ok(()) } pub async fn upload_file_direct_async( authorization: &str, file_path: &str, url: &str, ) -> Result<()> { let mut file = File::open(file_path)?; let mut contents = Vec::new(); file.read_to_end(&mut contents)?; let client = reqwest::Client::new(); client .post(url) .header(AUTHORIZATION, authorization) .header(CONTENT_TYPE, "application/octet-stream") .body(contents) .send() .await? .error_for_status()?; Ok(()) } pub fn vrs_to_rsv(rsv: &str) -> String { format!("{}{}{}", &rsv[2..66], &rsv[66..130], &rsv[..2]) } pub fn remove_file_if_exists(file_path: &str) -> Result<()> { if Path::new(file_path).exists() { remove_file(file_path)?; } Ok(()) } pub async fn get_content_length(url: &str) -> Result<u64> { let client = reqwest::Client::new(); let result = client.head(url).send().await?.error_for_status()?; Ok(result.headers()["content-length"]
.parse::<u64>()?) } pub async fn get_ceremony(url: &str) -> Result<Ceremony> { let response = reqwest::get(url).await?.error_for_status()?; let data = response.text().await?; let ceremony: Ceremony = serde_json::from_str::<Response<Ceremony>>(&data)?.result; Ok(ceremony) } use crate::transcript_data_structs::Transcript; use blake2::{Blake2s, Digest}; use ethers::signers::{LocalWallet, Signer}; use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy}; use rand::rngs::OsRng; use rand::RngCore; pub fn verify_signed_data<T: Serialize>(data: &T, signature: &str, id: &str) -> Result<()> { let signature = Signature::from_str(&signature[2..])?; let serialized_data = serde_json::to_string(data)?; let deserialized_id = hex::decode(&id[2..])?; if deserialized_id.len() != ADDRESS_LENGTH { return Err(VerifyTranscriptError::IDWrongLength(deserialized_id.len()).into()); } let mut address = [0u8; ADDRESS_LENGTH]; address.copy_from_slice(&deserialized_id); let address = Address::from(address); signature.verify(serialized_data, address)?; Ok(()) } pub fn read_hash_from_file(file_name: &str) -> Result<String> { let mut hash = vec![]; File::open(file_name) .expect("Should have opened hash file.") .read_to_end(&mut hash) .expect("Should have read hash file."); let hash_hex = hex::encode(&hash); Ok(hash_hex) } pub fn proving_system_from_str(proving_system_str: &str) -> Result<ProvingSystem> { let proving_system = match proving_system_str { "groth16" => ProvingSystem::Groth16, "marlin" => ProvingSystem::Marlin, _ => { return Err(VerifyTranscriptError::UnsupportedProvingSystemError( proving_system_str.to_string(), ) .into()); } }; Ok(proving_system) } pub fn check_challenge_hashes_same(a: &str, b: &str) -> Result<()> { if a != b { return Err(VerifyTranscriptError::WrongChallengeHash(a.to_string(), b.to_string()).into()); } Ok(()) } pub fn check_response_hashes_same(a: &str, b: &str) -> Result<()> { if a != b { return Err(VerifyTranscriptError::WrongResponseHash(a.to_string(), b.to_string()).into()); } Ok(()) } pub fn check_new_challenge_hashes_same(a: &str, b: &str) -> Result<()> { if a != b { return Err( VerifyTranscriptError::WrongNewChallengeHash(a.to_string(), b.to_string()).into(), ); } Ok(()) } pub fn get_authorization_value( private_key: &LocalWallet, method: &str, path: &str, ) -> Result<String> { let address = private_key.address().encode_hex::<String>(); let message = format!("{} /{}", method.to_lowercase(), path.to_lowercase()); let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?; let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string()); Ok(authorization) } pub fn create_parameters_for_chunk<E: PairingEngine>( ceremony_parameters: &Parameters, chunk_index: usize, ) -> Result<Phase1Parameters<E>> { let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?; let parameters = Phase1Parameters::<E>::new_chunk( ContributionMode::Chunked, chunk_index, ceremony_parameters.chunk_size, proving_system, ceremony_parameters.power, ceremony_parameters.batch_size, ); Ok(parameters) } pub fn create_full_parameters<E: PairingEngine>( ceremony_parameters: &Parameters, ) -> Result<Phase1Parameters<E>> { let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?; let parameters = Phase1Parameters::<E>::new_full( proving_system, ceremony_parameters.power, ceremony_parameters.batch_size, ); Ok(parameters) } pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> { let message = serde_json::to_string(value)?; let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?; Ok(format!("0x{}", signature.to_string())) } pub fn address_to_string(address: &Address) -> String { format!("0x{}", address.encode_hex::<String>()) } #[derive(Debug, Clone, Copy)] pub enum UploadMode { Auto, Azure, Direct, } pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> { match upload_mode { "auto" => Ok(UploadMode::Auto), "azure" => Ok(UploadMode::Azure), "direct" => Ok(UploadMode::Direct), _ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()), } } #[derive(Debug, PartialEq, Clone, Copy)] pub enum ParticipationMode { Contribute, Verify, } pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> { match participation_mode { "contribute" => Ok(ParticipationMode::Contribute), "verify" => Ok(ParticipationMode::Verify), _ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()), } } fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> { let decoded = SecretVec::new(hex::decode(encrypted)?); let decryptor = age::Decrypt
.to_str()?
random_line_split
utils.rs
message = format!("{} /{}", method.to_lowercase(), path.to_lowercase()); let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?; let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string()); Ok(authorization) } pub fn create_parameters_for_chunk<E: PairingEngine>( ceremony_parameters: &Parameters, chunk_index: usize, ) -> Result<Phase1Parameters<E>> { let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?; let parameters = Phase1Parameters::<E>::new_chunk( ContributionMode::Chunked, chunk_index, ceremony_parameters.chunk_size, proving_system, ceremony_parameters.power, ceremony_parameters.batch_size, ); Ok(parameters) } pub fn create_full_parameters<E: PairingEngine>( ceremony_parameters: &Parameters, ) -> Result<Phase1Parameters<E>> { let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?; let parameters = Phase1Parameters::<E>::new_full( proving_system, ceremony_parameters.power, ceremony_parameters.batch_size, ); Ok(parameters) } pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> { let message = serde_json::to_string(value)?; let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?; Ok(format!("0x{}", signature.to_string())) } pub fn address_to_string(address: &Address) -> String { format!("0x{}", address.encode_hex::<String>()) } #[derive(Debug, Clone, Copy)] pub enum UploadMode { Auto, Azure, Direct, } pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> { match upload_mode { "auto" => Ok(UploadMode::Auto), "azure" => Ok(UploadMode::Azure), "direct" => Ok(UploadMode::Direct), _ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()), } } #[derive(Debug, PartialEq, Clone, Copy)] pub enum ParticipationMode { Contribute, Verify, } pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> { match participation_mode { "contribute" => Ok(ParticipationMode::Contribute), "verify" => Ok(ParticipationMode::Verify), _ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()), } } fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> { let decoded = SecretVec::new(hex::decode(encrypted)?); let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?; let mut output = vec![]; if let age::Decryptor::Passphrase(decryptor) = decryptor { let mut reader = decryptor.decrypt(passphrase, None)?; reader.read_to_end(&mut output)?; } else { return Err(UtilsError::UnsupportedDecryptorError.into()); } Ok(output) } pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> { let mut encrypted_output = vec![]; let mut writer = encryptor .wrap_output(ArmoredWriter::wrap_output( &mut encrypted_output, Format::Binary, )?) .map_err(|e| match e { EncryptError::Io(e) => e, })?; std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?; writer.finish()?; let encrypted_secret = hex::encode(&encrypted_output); Ok(encrypted_secret.to_string()) } pub fn read_keys( keys_file: &str, should_use_stdin: bool, should_collect_extra_entropy: bool, ) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> { let mut contents = String::new(); { std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?; } let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?; let description = "Enter your Plumo setup passphrase"; let passphrase = if should_use_stdin { println!("{}:", description); SecretString::new(rpassword::read_password()?) } else { age::cli_common::read_secret(description, "Passphrase", None) .map_err(|_| UtilsError::CouldNotReadPassphraseError)? }; let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?); let plumo_private_key_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?); if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() && !should_use_stdin { let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)"; let entered_entropy = age::cli_common::read_secret(description, "Entropy", None) .map_err(|_| UtilsError::CouldNotReadEntropyError)?; let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone()); let mut rng = OsRng; let mut extra_entropy = vec![0u8; 64]; rng.fill_bytes(&mut extra_entropy[..]); let extra_entropy = SecretVec::new(extra_entropy); let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION); hasher.update(extra_entropy.expose_secret()); hasher.update(entered_entropy.expose_secret()); let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec()); let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?; keys.encrypted_extra_entropy = Some(encrypted_extra_entropy); let mut file = OpenOptions::new().write(true).open(&keys_file)?; file.write_all(&serde_json::to_vec(&keys)?)?; file.sync_all()?; } let plumo_seed = match keys.encrypted_extra_entropy { None => plumo_seed_from_file, Some(encrypted_entropy) => { let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?); let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION); hasher.update(plumo_seed_from_file.expose_secret()); hasher.update(entropy.expose_secret()); SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec()) } }; Ok((plumo_seed, plumo_private_key_from_file, keys.attestation)) } pub fn collect_processor_data() -> Result<Vec<ProcessorData>> { cfg_if::cfg_if! { if #[cfg(not(target_arch = "aarch64"))] { use sysinfo::{ProcessorExt, System, SystemExt}; let s = System::new(); let processors = s .get_processors() .iter() .map(|p| ProcessorData { name: p.get_name().to_string(), brand: p.get_brand().to_string(), frequency: p.get_frequency().to_string(), }) .collect(); Ok(processors) } else { Ok(vec![]) } } } pub struct MaxRetriesHandler { max_attempts: usize, } impl MaxRetriesHandler { pub fn new(max_attempts: usize) -> Self { MaxRetriesHandler { max_attempts } } } impl ErrorHandler<anyhow::Error> for MaxRetriesHandler { type OutError = anyhow::Error; fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> { warn!( "Failed: {}, retry {}/{}", e.to_string(), attempt, self.max_attempts, ); if attempt >= self.max_attempts { RetryPolicy::ForwardError(e) } else { RetryPolicy::WaitRetry( chrono::Duration::seconds(5) .to_std() .expect("Should have converted to standard duration"), ) } } } pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 { parameters.accumulator_size as u64 } pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 { parameters.contribution_size as u64 } pub fn load_transcript() -> Result<Transcript> { let filename = "transcript"; if !std::path::Path::new(filename).exists() { let mut file = File::create(filename)?; file.write_all( serde_json::to_string_pretty(&Transcript { rounds: vec![], beacon_hash: None, final_hash: None, })? .as_bytes(), )?; } let mut file = File::open(filename)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?; Ok(transcript) } pub fn save_transcript(transcript: &Transcript) -> Result<()> { let filename = "transcript"; let mut file = File::create(filename)?; file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?; Ok(()) } pub fn
backup_transcript
identifier_name
schema.py
=_( "Name of the source system (e.g. 'myhelsinki', 'ahti', " "'ulkoliikuntakartta', 'digitransit')" ), ) type = graphene.String( required=True, description=_( "Type of the feature in the source system, if applicable (e.g. 'place', " "'activity', 'event', 'route')" ), ) id = graphene.String( required=True, description="ID of the current feature in source system" ) class PriceTag(DjangoObjectType): """An item displayed in a price list.""" class Meta: model = models.PriceTag fields = ("price",) item = graphene.String(required=True, description=_("Name of the item")) price = graphene.Decimal(required=True, description=_("Price of the item in EUR")) unit = graphene.String( description=_( "Unit of the price (e.g. 'hour', 'day', 'piece', 'person', 'child', " "'one way')" ), ) class Teaser(DjangoObjectType): """Simple content element (e.g. something special about a feature).""" class Meta: model = models.FeatureTeaser fields = () # Don't include any fields from the model automatically header = graphene.String( description=_("An opening, e.g. 'Starting' from 'Starting from 7€/day.'") ) main = graphene.String(description=_("The meat of the deal, '7€/day' part")) class FeatureTranslations(DjangoObjectType): "Values in other languages for the feature attributes that can have translations." language_code = LanguageEnum(required=True) class Meta: model = apps.get_model("features", "FeatureTranslation") exclude = ("id", "master") class Image(DjangoObjectType): class Meta: model = models.Image fields = ( "url", "copyright_owner", "license", ) class License(DjangoObjectType): class Meta: model = models.License fields = ("id",) name = graphene.String(required=True, description=_("Display name of the license")) class Tag(
ngoObjectType): """Tags are associated with things (like features).""" class Meta: model = models.Tag fields = ("id", "features") name = graphene.String(required=True, description=_("Display name of the tag")) class OpeningHoursPeriod(DjangoObjectType): """A period during which certain opening hours are valid.""" class Meta: model = models.OpeningHoursPeriod fields = ( "valid_from", "valid_to", "opening_hours", ) comment = graphene.String( description=_( "Comment for this opening hour period (e.g. 'Exceptional opening hours " "during Midsummer')" ), ) class OpeningHours(DjangoObjectType): """The daily opening hours / hours of operation of something.""" class Meta: model = models.OpeningHours fields = ( "opens", "closes", "all_day", ) day = WeekdayEnum(required=True, description=_("Day of week")) class Depth(ObjectType): """The depth of something, in meters. Can be a single value (min and max are equal) or a range. (Consider: harbor/lake/pool/mineshaft)." """ min = graphene.Float( required=True, description=_( "An approximation of the minimum depth (or lower end of the range)" ), ) max = graphene.Float( required=True, description=_( "An approximation of the maximum depth (or deeper end of the range)" ), ) class HarborDetails(ObjectType): """Information specific to harbors (and piers).""" moorings = graphene.List( graphene.NonNull(HarborMooringTypeEnum), description=_("Mooring types available in the harbor"), ) depth = graphene.Field( Depth, description=_("Approximate depth of the harbor, in meters") ) def resolve_moorings(self: models.FeatureDetails, info, **kwargs): return self.data["berth_moorings"] def resolve_depth(self: models.FeatureDetails, info, **kwargs): """Minimum depth is mandatory, maximum is included for a range.""" min = self.data.get("berth_min_depth") max = self.data.get("berth_max_depth") if min is None: return None return { "min": min, "max": max, } class FeatureDetails(ObjectType): """Detailed information a feature might have.""" harbor = graphene.Field(HarborDetails, description=_("Details of a harbor")) price_list = graphene.List( "features.schema.PriceTag", required=True, description=_("Price list related to a feature"), ) class FeatureFilter(django_filters.FilterSet): """Contains the filters to use when retrieving features.""" class Meta: model = models.Feature fields = [ "distance_lte", "updated_since", "tagged_with_any", "tagged_with_all", "category", ] distance_lte = DistanceFilter( field_name="geometry", lookup_expr="distance_lte", label=_("Fetch features within a given distance from the given geometry"), ) updated_since = django_filters.IsoDateTimeFilter( method="filter_updated_since", label=_("Fetch features that have changed since specified timestamp"), ) tagged_with_any = StringListFilter( method="filter_tagged_with_any", label=_("Fetch features tagged with any of the specified tags (ids)"), ) tagged_with_all = StringListFilter( method="filter_tagged_with_all", label=_("Fetch features tagged with all of the specified tags (ids)"), ) category = StringListFilter( method="filter_category", label=_("Fetch features from included categories") ) def filter_updated_since(self, queryset, name, value): return queryset.filter( Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value) ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_any(self, queryset, name, value): return queryset.filter( tags__in=value ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_all(self, queryset, name, value): for v in value: queryset = queryset.filter(tags=v) return queryset def filter_category(self, queryset, name, value): return queryset.filter(category__in=value) class Feature(graphql_geojson.GeoJSONType): """Features in Ahti are structured according to GeoJSON specification. All Ahti specific attributes are contained within attribute `properties`. **Note!** `Feature.type` always has the value `Feature`. """ class Meta: fields = ( "id", "category", "created_at", "contact_info", "teaser", "details", "geometry", "images", "links", "opening_hours_periods", "tags", "translations", ) filterset_class = FeatureFilter model = models.Feature geojson_field = "geometry" interfaces = (relay.Node,) ahti_id = graphene.String( required=True, description=_( "Human readable ID. Format examples: " "'ahti:feature:12C4' or 'myhelsinki:place:5678'" ), ) source = graphene.Field( FeatureSource, required=True, description=_("Source of the feature") ) name = graphene.String(required=True, description=_("Name of the feature")) one_liner = graphene.String( required=True, description=_("Short introductory text or a tagline") ) description = graphene.String(description=_("Description of the feature")) details = graphene.Field( FeatureDetails, description=_("Detailed information a feature might have") ) url = graphene.String(description=_("URL for more information about this feature")) modified_at = graphene.DateTime(required=True) parents = graphene.List( "features.schema.Feature", required=True, description=_("Parents of this feature"), ) children = graphene.List( "features.schema.Feature", required=True, description=_( "Children of this feature (ex. stops along a route, piers of a harbor etc.)" ), ) def resolve_source(self: models.Feature, info, **kwargs): return { "system": self.source_type.system, "type": self.source_type.type, "id": self.source_id, } def resolve_name(self: models.Feature, info, **kwargs): name_override = self.overrides.filter(field=OverrideFieldType.NAME).first() if name_override: return name_override.value return self.name def resolve_modified_at(self: models.Feature, info, **kwargs): latest_override = self.overrides.order_by("-modified_at").first() return ( max(self.source_modified_at, latest_override.modified_at) if latest_override else self.source_modified_at ) def resolve_details(self: models.Feature, info, **kwargs): details = {} for detail in self.details.all(): # Default dict resolver will resolve this for FeatureDetails details[detail.type.lower()] = detail # PriceTags have a relation to Feature model, so we resolve it separately details["price_list"] = self.price_tags.all() return details if details else None def resolve_parents(self
Dja
identifier_name
schema.py
minimum depth (or lower end of the range)" ), ) max = graphene.Float( required=True, description=_( "An approximation of the maximum depth (or deeper end of the range)" ), ) class HarborDetails(ObjectType): """Information specific to harbors (and piers).""" moorings = graphene.List( graphene.NonNull(HarborMooringTypeEnum), description=_("Mooring types available in the harbor"), ) depth = graphene.Field( Depth, description=_("Approximate depth of the harbor, in meters") ) def resolve_moorings(self: models.FeatureDetails, info, **kwargs): return self.data["berth_moorings"] def resolve_depth(self: models.FeatureDetails, info, **kwargs): """Minimum depth is mandatory, maximum is included for a range.""" min = self.data.get("berth_min_depth") max = self.data.get("berth_max_depth") if min is None: return None return { "min": min, "max": max, } class FeatureDetails(ObjectType): """Detailed information a feature might have.""" harbor = graphene.Field(HarborDetails, description=_("Details of a harbor")) price_list = graphene.List( "features.schema.PriceTag", required=True, description=_("Price list related to a feature"), ) class FeatureFilter(django_filters.FilterSet): """Contains the filters to use when retrieving features.""" class Meta: model = models.Feature fields = [ "distance_lte", "updated_since", "tagged_with_any", "tagged_with_all", "category", ] distance_lte = DistanceFilter( field_name="geometry", lookup_expr="distance_lte", label=_("Fetch features within a given distance from the given geometry"), ) updated_since = django_filters.IsoDateTimeFilter( method="filter_updated_since", label=_("Fetch features that have changed since specified timestamp"), ) tagged_with_any = StringListFilter( method="filter_tagged_with_any", label=_("Fetch features tagged with any of the specified tags (ids)"), ) tagged_with_all = StringListFilter( method="filter_tagged_with_all", label=_("Fetch features tagged with all of the specified tags (ids)"), ) category = StringListFilter( method="filter_category", label=_("Fetch features from included categories") ) def filter_updated_since(self, queryset, name, value): return queryset.filter( Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value) ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_any(self, queryset, name, value): return queryset.filter( tags__in=value ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_all(self, queryset, name, value): for v in value: queryset = queryset.filter(tags=v) return queryset def filter_category(self, queryset, name, value): return queryset.filter(category__in=value) class Feature(graphql_geojson.GeoJSONType): """Features in Ahti are structured according to GeoJSON specification. All Ahti specific attributes are contained within attribute `properties`. **Note!** `Feature.type` always has the value `Feature`. """ class Meta: fields = ( "id", "category", "created_at", "contact_info", "teaser", "details", "geometry", "images", "links", "opening_hours_periods", "tags", "translations", ) filterset_class = FeatureFilter model = models.Feature geojson_field = "geometry" interfaces = (relay.Node,) ahti_id = graphene.String( required=True, description=_( "Human readable ID. Format examples: " "'ahti:feature:12C4' or 'myhelsinki:place:5678'" ), ) source = graphene.Field( FeatureSource, required=True, description=_("Source of the feature") ) name = graphene.String(required=True, description=_("Name of the feature")) one_liner = graphene.String( required=True, description=_("Short introductory text or a tagline") ) description = graphene.String(description=_("Description of the feature")) details = graphene.Field( FeatureDetails, description=_("Detailed information a feature might have") ) url = graphene.String(description=_("URL for more information about this feature")) modified_at = graphene.DateTime(required=True) parents = graphene.List( "features.schema.Feature", required=True, description=_("Parents of this feature"), ) children = graphene.List( "features.schema.Feature", required=True, description=_( "Children of this feature (ex. stops along a route, piers of a harbor etc.)" ), ) def resolve_source(self: models.Feature, info, **kwargs): return { "system": self.source_type.system, "type": self.source_type.type, "id": self.source_id, } def resolve_name(self: models.Feature, info, **kwargs): name_override = self.overrides.filter(field=OverrideFieldType.NAME).first() if name_override: return name_override.value return self.name def resolve_modified_at(self: models.Feature, info, **kwargs): latest_override = self.overrides.order_by("-modified_at").first() return ( max(self.source_modified_at, latest_override.modified_at) if latest_override else self.source_modified_at ) def resolve_details(self: models.Feature, info, **kwargs): details = {} for detail in self.details.all(): # Default dict resolver will resolve this for FeatureDetails details[detail.type.lower()] = detail # PriceTags have a relation to Feature model, so we resolve it separately details["price_list"] = self.price_tags.all() return details if details else None def resolve_parents(self: models.Feature, info, **kwargs): return self.parents.all() def resolve_children(self: models.Feature, info, **kwargs): return self.children.all() @classmethod def get_queryset(cls, queryset, info): return ( queryset.filter(visibility=Visibility.VISIBLE) .select_related("source_type", "category", "teaser") .prefetch_related( "category__translations", "contact_info", "children", "details", "price_tags", "price_tags__translations", "images", "images__license", "images__license__translations", "links", "opening_hours_periods", "opening_hours_periods__opening_hours", "opening_hours_periods__translations", "parents", "tags", "tags__translations", "teaser__translations", "translations", ) ) class FeatureTranslationsInput(graphene.InputObjectType): language_code = LanguageEnum(required=True) name = graphene.String(required=True, description=_("Name of the feature")) description = graphene.String(description=_("Description of the feature")) url = graphene.String(description=_("URL for more information about this feature")) one_liner = graphene.String(description=_("Short introductory text or a tagline")) class ContactInfoInput(graphene.InputObjectType): street_address = graphene.String() postal_code = graphene.String() municipality = graphene.String() phone_number = graphene.String() email = graphene.String() class CreateFeatureMutation(relay.ClientIDMutation): class Input: translations = graphene.List( graphene.NonNull(FeatureTranslationsInput), required=True ) geometry = graphql_geojson.Geometry(required=True) contact_info = ContactInfoInput() category_id = graphene.String() tag_ids = graphene.List(graphene.String) feature = graphene.Field(Feature) @classmethod def get_source_type(cls): st, created = models.SourceType.objects.get_or_create(system="ahti", type="api") return st @classmethod @transaction.atomic def mutate_and_get_payload(cls, root, info, **kwargs): contact_info_values = kwargs.pop("contact_info", None) tag_ids = kwargs.pop("tag_ids", None) category_id = kwargs.pop("category_id", None) now = timezone.now() values = { "source_type": cls.get_source_type(), "source_id": uuid.uuid4(), "source_modified_at": now, "mapped_at": now, "visibility": Visibility.DRAFT, } values.update(kwargs) if category_id: values["category"] = Category.objects.get(id=category_id) if tag_ids: tags = [models.Tag.objects.get(id=tag_id) for tag_id in tag_ids] else: tags = [] feature = models.Feature.objects.create_translatable_object(**values) if contact_info_values: ci = models.ContactInfo.objects.create( feature=feature, **contact_info_values ) ci.full_clean() ci.save() if tags: feature.tags.set(tags) return CreateFeatureMutation(feature=feature) class Query(graphene.ObjectType): features = DjangoFilterConnectionField( Feature, description=_("Retrieve all features matching the given filters") ) feature = graphene.Field( Feature, id=ID(description=_("The ID of the object")),
ahti_id=String(description=_("Ahti ID of the object")), description=_("Retrieve a single feature"), ) tags = graphene.List(Tag, description=_("Retrieve all tags"))
random_line_split
schema.py
) main = graphene.String(description=_("The meat of the deal, '7€/day' part")) class FeatureTranslations(DjangoObjectType): "Values in other languages for the feature attributes that can have translations." language_code = LanguageEnum(required=True) class Meta: model = apps.get_model("features", "FeatureTranslation") exclude = ("id", "master") class Image(DjangoObjectType): class Meta: model = models.Image fields = ( "url", "copyright_owner", "license", ) class License(DjangoObjectType): class Meta: model = models.License fields = ("id",) name = graphene.String(required=True, description=_("Display name of the license")) class Tag(DjangoObjectType): """Tags are associated with things (like features).""" class Meta: model = models.Tag fields = ("id", "features") name = graphene.String(required=True, description=_("Display name of the tag")) class OpeningHoursPeriod(DjangoObjectType): """A period during which certain opening hours are valid.""" class Meta: model = models.OpeningHoursPeriod fields = ( "valid_from", "valid_to", "opening_hours", ) comment = graphene.String( description=_( "Comment for this opening hour period (e.g. 'Exceptional opening hours " "during Midsummer')" ), ) class OpeningHours(DjangoObjectType): """The daily opening hours / hours of operation of something.""" class Meta: model = models.OpeningHours fields = ( "opens", "closes", "all_day", ) day = WeekdayEnum(required=True, description=_("Day of week")) class Depth(ObjectType): """The depth of something, in meters. Can be a single value (min and max are equal) or a range. (Consider: harbor/lake/pool/mineshaft)." """ min = graphene.Float( required=True, description=_( "An approximation of the minimum depth (or lower end of the range)" ), ) max = graphene.Float( required=True, description=_( "An approximation of the maximum depth (or deeper end of the range)" ), ) class HarborDetails(ObjectType): """Information specific to harbors (and piers).""" moorings = graphene.List( graphene.NonNull(HarborMooringTypeEnum), description=_("Mooring types available in the harbor"), ) depth = graphene.Field( Depth, description=_("Approximate depth of the harbor, in meters") ) def resolve_moorings(self: models.FeatureDetails, info, **kwargs): return self.data["berth_moorings"] def resolve_depth(self: models.FeatureDetails, info, **kwargs): """Minimum depth is mandatory, maximum is included for a range.""" min = self.data.get("berth_min_depth") max = self.data.get("berth_max_depth") if min is None: return None return { "min": min, "max": max, } class FeatureDetails(ObjectType): """Detailed information a feature might have.""" harbor = graphene.Field(HarborDetails, description=_("Details of a harbor")) price_list = graphene.List( "features.schema.PriceTag", required=True, description=_("Price list related to a feature"), ) class FeatureFilter(django_filters.FilterSet): """Contains the filters to use when retrieving features.""" class Meta: model = models.Feature fields = [ "distance_lte", "updated_since", "tagged_with_any", "tagged_with_all", "category", ] distance_lte = DistanceFilter( field_name="geometry", lookup_expr="distance_lte", label=_("Fetch features within a given distance from the given geometry"), ) updated_since = django_filters.IsoDateTimeFilter( method="filter_updated_since", label=_("Fetch features that have changed since specified timestamp"), ) tagged_with_any = StringListFilter( method="filter_tagged_with_any", label=_("Fetch features tagged with any of the specified tags (ids)"), ) tagged_with_all = StringListFilter( method="filter_tagged_with_all", label=_("Fetch features tagged with all of the specified tags (ids)"), ) category = StringListFilter( method="filter_category", label=_("Fetch features from included categories") ) def filter_updated_since(self, queryset, name, value): return queryset.filter( Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value) ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_any(self, queryset, name, value): return queryset.filter( tags__in=value ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_all(self, queryset, name, value): for v in value: queryset = queryset.filter(tags=v) return queryset def filter_category(self, queryset, name, value): return queryset.filter(category__in=value) class Feature(graphql_geojson.GeoJSONType): """Features in Ahti are structured according to GeoJSON specification. All Ahti specific attributes are contained within attribute `properties`. **Note!** `Feature.type` always has the value `Feature`. """ class Meta: fields = ( "id", "category", "created_at", "contact_info", "teaser", "details", "geometry", "images", "links", "opening_hours_periods", "tags", "translations", ) filterset_class = FeatureFilter model = models.Feature geojson_field = "geometry" interfaces = (relay.Node,) ahti_id = graphene.String( required=True, description=_( "Human readable ID. Format examples: " "'ahti:feature:12C4' or 'myhelsinki:place:5678'" ), ) source = graphene.Field( FeatureSource, required=True, description=_("Source of the feature") ) name = graphene.String(required=True, description=_("Name of the feature")) one_liner = graphene.String( required=True, description=_("Short introductory text or a tagline") ) description = graphene.String(description=_("Description of the feature")) details = graphene.Field( FeatureDetails, description=_("Detailed information a feature might have") ) url = graphene.String(description=_("URL for more information about this feature")) modified_at = graphene.DateTime(required=True) parents = graphene.List( "features.schema.Feature", required=True, description=_("Parents of this feature"), ) children = graphene.List( "features.schema.Feature", required=True, description=_( "Children of this feature (ex. stops along a route, piers of a harbor etc.)" ), ) def resolve_source(self: models.Feature, info, **kwargs): return { "system": self.source_type.system, "type": self.source_type.type, "id": self.source_id, } def resolve_name(self: models.Feature, info, **kwargs): name_override = self.overrides.filter(field=OverrideFieldType.NAME).first() if name_override: return name_override.value return self.name def resolve_modified_at(self: models.Feature, info, **kwargs): latest_override = self.overrides.order_by("-modified_at").first() return ( max(self.source_modified_at, latest_override.modified_at) if latest_override else self.source_modified_at ) def resolve_details(self: models.Feature, info, **kwargs): details = {} for detail in self.details.all(): # Default dict resolver will resolve this for FeatureDetails details[detail.type.lower()] = detail # PriceTags have a relation to Feature model, so we resolve it separately details["price_list"] = self.price_tags.all() return details if details else None def resolve_parents(self: models.Feature, info, **kwargs): return self.parents.all() def resolve_children(self: models.Feature, info, **kwargs): return self.children.all() @classmethod def get_queryset(cls, queryset, info): return ( queryset.filter(visibility=Visibility.VISIBLE) .select_related("source_type", "category", "teaser") .prefetch_related( "category__translations", "contact_info", "children", "details", "price_tags", "price_tags__translations", "images", "images__license", "images__license__translations", "links", "opening_hours_periods", "opening_hours_periods__opening_hours", "opening_hours_periods__translations", "parents", "tags", "tags__translations", "teaser__translations", "translations", ) ) class FeatureTranslationsInput(graphene.InputObjectType): language_code = LanguageEnum(required=True) name = graphene.String(required=True, description=_("Name of the feature")) description = graphene.String(description=_("Description of the feature")) url = graphene.String(description=_("URL for more information about this feature")) one_liner = graphene.String(description=_("Short introductory text or a tagline")) class ContactInfoInput(graphene.InputObjectType): stre
et_address = graphene.String() postal_code = graphene.String() municipality = graphene.String() phone_number = graphene.String() email = graphene.String() c
identifier_body
schema.py
), ) class HarborDetails(ObjectType): """Information specific to harbors (and piers).""" moorings = graphene.List( graphene.NonNull(HarborMooringTypeEnum), description=_("Mooring types available in the harbor"), ) depth = graphene.Field( Depth, description=_("Approximate depth of the harbor, in meters") ) def resolve_moorings(self: models.FeatureDetails, info, **kwargs): return self.data["berth_moorings"] def resolve_depth(self: models.FeatureDetails, info, **kwargs): """Minimum depth is mandatory, maximum is included for a range.""" min = self.data.get("berth_min_depth") max = self.data.get("berth_max_depth") if min is None: return None return { "min": min, "max": max, } class FeatureDetails(ObjectType): """Detailed information a feature might have.""" harbor = graphene.Field(HarborDetails, description=_("Details of a harbor")) price_list = graphene.List( "features.schema.PriceTag", required=True, description=_("Price list related to a feature"), ) class FeatureFilter(django_filters.FilterSet): """Contains the filters to use when retrieving features.""" class Meta: model = models.Feature fields = [ "distance_lte", "updated_since", "tagged_with_any", "tagged_with_all", "category", ] distance_lte = DistanceFilter( field_name="geometry", lookup_expr="distance_lte", label=_("Fetch features within a given distance from the given geometry"), ) updated_since = django_filters.IsoDateTimeFilter( method="filter_updated_since", label=_("Fetch features that have changed since specified timestamp"), ) tagged_with_any = StringListFilter( method="filter_tagged_with_any", label=_("Fetch features tagged with any of the specified tags (ids)"), ) tagged_with_all = StringListFilter( method="filter_tagged_with_all", label=_("Fetch features tagged with all of the specified tags (ids)"), ) category = StringListFilter( method="filter_category", label=_("Fetch features from included categories") ) def filter_updated_since(self, queryset, name, value): return queryset.filter( Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value) ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_any(self, queryset, name, value): return queryset.filter( tags__in=value ).distinct() # Distinct because filtering on ForeignKey relation. def filter_tagged_with_all(self, queryset, name, value): for v in value: queryset = queryset.filter(tags=v) return queryset def filter_category(self, queryset, name, value): return queryset.filter(category__in=value) class Feature(graphql_geojson.GeoJSONType): """Features in Ahti are structured according to GeoJSON specification. All Ahti specific attributes are contained within attribute `properties`. **Note!** `Feature.type` always has the value `Feature`. """ class Meta: fields = ( "id", "category", "created_at", "contact_info", "teaser", "details", "geometry", "images", "links", "opening_hours_periods", "tags", "translations", ) filterset_class = FeatureFilter model = models.Feature geojson_field = "geometry" interfaces = (relay.Node,) ahti_id = graphene.String( required=True, description=_( "Human readable ID. Format examples: " "'ahti:feature:12C4' or 'myhelsinki:place:5678'" ), ) source = graphene.Field( FeatureSource, required=True, description=_("Source of the feature") ) name = graphene.String(required=True, description=_("Name of the feature")) one_liner = graphene.String( required=True, description=_("Short introductory text or a tagline") ) description = graphene.String(description=_("Description of the feature")) details = graphene.Field( FeatureDetails, description=_("Detailed information a feature might have") ) url = graphene.String(description=_("URL for more information about this feature")) modified_at = graphene.DateTime(required=True) parents = graphene.List( "features.schema.Feature", required=True, description=_("Parents of this feature"), ) children = graphene.List( "features.schema.Feature", required=True, description=_( "Children of this feature (ex. stops along a route, piers of a harbor etc.)" ), ) def resolve_source(self: models.Feature, info, **kwargs): return { "system": self.source_type.system, "type": self.source_type.type, "id": self.source_id, } def resolve_name(self: models.Feature, info, **kwargs): name_override = self.overrides.filter(field=OverrideFieldType.NAME).first() if name_override: return name_override.value return self.name def resolve_modified_at(self: models.Feature, info, **kwargs): latest_override = self.overrides.order_by("-modified_at").first() return ( max(self.source_modified_at, latest_override.modified_at) if latest_override else self.source_modified_at ) def resolve_details(self: models.Feature, info, **kwargs): details = {} for detail in self.details.all(): # Default dict resolver will resolve this for FeatureDetails details[detail.type.lower()] = detail # PriceTags have a relation to Feature model, so we resolve it separately details["price_list"] = self.price_tags.all() return details if details else None def resolve_parents(self: models.Feature, info, **kwargs): return self.parents.all() def resolve_children(self: models.Feature, info, **kwargs): return self.children.all() @classmethod def get_queryset(cls, queryset, info): return ( queryset.filter(visibility=Visibility.VISIBLE) .select_related("source_type", "category", "teaser") .prefetch_related( "category__translations", "contact_info", "children", "details", "price_tags", "price_tags__translations", "images", "images__license", "images__license__translations", "links", "opening_hours_periods", "opening_hours_periods__opening_hours", "opening_hours_periods__translations", "parents", "tags", "tags__translations", "teaser__translations", "translations", ) ) class FeatureTranslationsInput(graphene.InputObjectType): language_code = LanguageEnum(required=True) name = graphene.String(required=True, description=_("Name of the feature")) description = graphene.String(description=_("Description of the feature")) url = graphene.String(description=_("URL for more information about this feature")) one_liner = graphene.String(description=_("Short introductory text or a tagline")) class ContactInfoInput(graphene.InputObjectType): street_address = graphene.String() postal_code = graphene.String() municipality = graphene.String() phone_number = graphene.String() email = graphene.String() class CreateFeatureMutation(relay.ClientIDMutation): class Input: translations = graphene.List( graphene.NonNull(FeatureTranslationsInput), required=True ) geometry = graphql_geojson.Geometry(required=True) contact_info = ContactInfoInput() category_id = graphene.String() tag_ids = graphene.List(graphene.String) feature = graphene.Field(Feature) @classmethod def get_source_type(cls): st, created = models.SourceType.objects.get_or_create(system="ahti", type="api") return st @classmethod @transaction.atomic def mutate_and_get_payload(cls, root, info, **kwargs): contact_info_values = kwargs.pop("contact_info", None) tag_ids = kwargs.pop("tag_ids", None) category_id = kwargs.pop("category_id", None) now = timezone.now() values = { "source_type": cls.get_source_type(), "source_id": uuid.uuid4(), "source_modified_at": now, "mapped_at": now, "visibility": Visibility.DRAFT, } values.update(kwargs) if category_id: values["category"] = Category.objects.get(id=category_id) if tag_ids: tags = [models.Tag.objects.get(id=tag_id) for tag_id in tag_ids] else: tags = [] feature = models.Feature.objects.create_translatable_object(**values) if contact_info_values: ci = models.ContactInfo.objects.create( feature=feature, **contact_info_values ) ci.full_clean() ci.save() if tags: feature.tags.set(tags) return CreateFeatureMutation(feature=feature) class Query(graphene.ObjectType): features = DjangoFilterConnectionField( Feature, description=_("Retrieve all features matching the given filters") ) feature = graphene.Field( Feature, id=ID(description=_("The ID of the object")), ahti_id=String(description=_("Ahti ID of the object")), description=_("Retrieve a single feature"), ) tags = graphene.List(Tag, description=_("Retrieve all tags")) def resolve_feature(self, info, id=None, ahti_id=None, **kwargs): if id: retu
rn relay.Node.get_node_from_global_id(info, id, only_type=Feature)
conditional_block
config.go
levant(n.IPAM) { return nil, "", NewInvalidPluginError(n.IPAM.Type) } args := types.IPAMEnvArgs{} if err := cnitypes.LoadArgs(envArgs, &args); err != nil { return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err) } n.IPAM.PodName = string(args.K8S_POD_NAME) n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE) flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...) if err != nil { return nil, "", err } // Now let's try to merge the configurations... // NB: Don't try to do any initialization before this point or it won't account for merged flat file. var OverlappingRanges bool = n.IPAM.OverlappingRanges if err := mergo.Merge(&n, flatipam); err != nil { logging.Errorf("Merge error with flat file: %s", err) } n.IPAM.OverlappingRanges = OverlappingRanges // Logging if n.IPAM.LogFile != "" { logging.SetLogFile(n.IPAM.LogFile) } if n.IPAM.LogLevel != "" { logging.SetLogLevel(n.IPAM.LogLevel) } if foundflatfile != "" { logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile) } if n.IPAM.Range != "" { oldRange := types.RangeConfiguration{ OmitRanges: n.IPAM.OmitRanges, Range: n.IPAM.Range, RangeStart: n.IPAM.RangeStart, RangeEnd: n.IPAM.RangeEnd, } n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...) } for idx := range n.IPAM.IPRanges { if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2
else { firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range) if err != nil { return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err) } n.IPAM.IPRanges[idx].Range = ipNet.String() if n.IPAM.IPRanges[idx].RangeStart == nil { firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address n.IPAM.IPRanges[idx].RangeStart = firstip } } } n.IPAM.OmitRanges = nil n.IPAM.Range = "" n.IPAM.RangeStart = nil n.IPAM.RangeEnd = nil if n.IPAM.Kubernetes.KubeConfigPath == "" { return nil, "", storageError() } if n.IPAM.GatewayStr != "" { gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr) if gwip == nil { return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr) } n.IPAM.Gateway = gwip } for i := range n.IPAM.OmitRanges { _, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err) } } if err := configureStatic(&n, args); err != nil { return nil, "", err } if n.IPAM.LeaderLeaseDuration == 0 { n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration } if n.IPAM.LeaderRenewDeadline == 0 { n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline } if n.IPAM.LeaderRetryPeriod == 0 { n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod } // Copy net name into IPAM so not to drag Net struct around n.IPAM.Name = n.Name return n.IPAM, n.CNIVersion, nil } func pathExists(path string) bool { _, err := os.Stat(path) if err == nil { return true } if os.IsNotExist(err) { return false } return true } func configureStatic(n *types.Net, args types.IPAMEnvArgs) error { // Validate all ranges numV4 := 0 numV6 := 0 for i := range n.IPAM.Addresses { ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr) if err != nil { return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err) } n.IPAM.Addresses[i].Address = *addr n.IPAM.Addresses[i].Address.IP = ip if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil { return fmt.Errorf("invalid address %d: %s", i, err) } if n.IPAM.Addresses[i].Address.IP.To4() != nil { n.IPAM.Addresses[i].Version = "4" numV4++ } else { n.IPAM.Addresses[i].Version = "6" numV6++ } } newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args) if err != nil { return err } numV4 = newnumV4 numV6 = newnumV6 // CNI spec 0.2.0 and below supported only one v4 and v6 address if numV4 > 1 || numV6 > 1 { for _, v := range types020.SupportedVersions { if n.CNIVersion == v { return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion) } } } return nil } func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) { // Once we have our basics, let's look for our (optional) configuration file confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"} confdirs = append(confdirs, extraConfigPaths...) // We prefix the optional configuration path (so we look there first) if !isControlLoop && IPAM != nil { if IPAM.ConfigurationPath != "" { confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...) } } // Cycle through the path and parse the JSON config flatipam := types.Net{} foundflatfile := "" for _, confpath := range confdirs { if pathExists(confpath) { jsonFile, err := os.Open(confpath) if err != nil { return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err) } defer jsonFile.Close() jsonBytes, err := io.ReadAll(jsonFile) if err != nil { return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - io.ReadAll error: %s", confpath, err) } if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil { return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s", confpath, err, jsonBytes) } foundflatfile = confpath return flatipam, foundflatfile, nil } } return flatipam, foundflatfile, NewConfigFileNotFoundError() } func handleEnvArgs(n *types.Net, numV6 int, numV4 int, args types.IPAMEnvArgs) (int, int,
{ firstip := netutils.ParseIPSloppy(r[0]) if firstip == nil { return nil, "", fmt.Errorf("invalid range start IP: %s", r[0]) } lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err) } if !ipNet.Contains(firstip) { return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip) } n.IPAM.IPRanges[idx].Range = ipNet.String() n.IPAM.IPRanges[idx].RangeStart = firstip n.IPAM.IPRanges[idx].RangeEnd = lastip }
conditional_block
config.go
// as `bytes`. At the moment values provided in envArgs are ignored so there // is no possibility to overload the json configuration using envArgs func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, string, error) { var n types.Net if err := json.Unmarshal(bytes, &n); err != nil { return nil, "", fmt.Errorf("LoadIPAMConfig - JSON Parsing Error: %s / bytes: %s", err, bytes) } if n.IPAM == nil { return nil, "", fmt.Errorf("IPAM config missing 'ipam' key") } else if !isNetworkRelevant(n.IPAM) { return nil, "", NewInvalidPluginError(n.IPAM.Type) } args := types.IPAMEnvArgs{} if err := cnitypes.LoadArgs(envArgs, &args); err != nil { return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err) } n.IPAM.PodName = string(args.K8S_POD_NAME) n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE) flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...) if err != nil { return nil, "", err } // Now let's try to merge the configurations... // NB: Don't try to do any initialization before this point or it won't account for merged flat file. var OverlappingRanges bool = n.IPAM.OverlappingRanges if err := mergo.Merge(&n, flatipam); err != nil { logging.Errorf("Merge error with flat file: %s", err) } n.IPAM.OverlappingRanges = OverlappingRanges // Logging if n.IPAM.LogFile != "" { logging.SetLogFile(n.IPAM.LogFile) } if n.IPAM.LogLevel != "" { logging.SetLogLevel(n.IPAM.LogLevel) } if foundflatfile != "" { logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile) } if n.IPAM.Range != "" { oldRange := types.RangeConfiguration{ OmitRanges: n.IPAM.OmitRanges, Range: n.IPAM.Range, RangeStart: n.IPAM.RangeStart, RangeEnd: n.IPAM.RangeEnd, } n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...) } for idx := range n.IPAM.IPRanges { if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 { firstip := netutils.ParseIPSloppy(r[0]) if firstip == nil { return nil, "", fmt.Errorf("invalid range start IP: %s", r[0]) } lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err) } if !ipNet.Contains(firstip) { return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip) } n.IPAM.IPRanges[idx].Range = ipNet.String() n.IPAM.IPRanges[idx].RangeStart = firstip n.IPAM.IPRanges[idx].RangeEnd = lastip } else { firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range) if err != nil { return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err) } n.IPAM.IPRanges[idx].Range = ipNet.String() if n.IPAM.IPRanges[idx].RangeStart == nil { firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address n.IPAM.IPRanges[idx].RangeStart = firstip } } } n.IPAM.OmitRanges = nil n.IPAM.Range = "" n.IPAM.RangeStart = nil n.IPAM.RangeEnd = nil if n.IPAM.Kubernetes.KubeConfigPath == "" { return nil, "", storageError() } if n.IPAM.GatewayStr != "" { gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr) if gwip == nil { return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr) } n.IPAM.Gateway = gwip } for i := range n.IPAM.OmitRanges { _, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err) } } if err := configureStatic(&n, args); err != nil { return nil, "", err } if n.IPAM.LeaderLeaseDuration == 0 { n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration } if n.IPAM.LeaderRenewDeadline == 0 { n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline } if n.IPAM.LeaderRetryPeriod == 0 { n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod } // Copy net name into IPAM so not to drag Net struct around n.IPAM.Name = n.Name return n.IPAM, n.CNIVersion, nil } func pathExists(path string) bool { _, err := os.Stat(path) if err == nil { return true } if os.IsNotExist(err) { return false } return true } func configureStatic(n *types.Net, args types.IPAMEnvArgs) error { // Validate all ranges numV4 := 0 numV6 := 0 for i := range n.IPAM.Addresses { ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr) if err != nil { return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err) } n.IPAM.Addresses[i].Address = *addr n.IPAM.Addresses[i].Address.IP = ip if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil { return fmt.Errorf("invalid address %d: %s", i, err) } if n.IPAM.Addresses[i].Address.IP.To4() != nil { n.IPAM.Addresses[i].Version = "4" numV4++ } else { n.IPAM.Addresses[i].Version = "6" numV6++ } } newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args) if err != nil { return err } numV4 = newnumV4 numV6 = newnumV6 // CNI spec 0.2.0 and below supported only one v4 and v6 address if numV4 > 1 || numV6 > 1 { for _, v := range types020.SupportedVersions { if n.CNIVersion == v { return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion) } } } return nil } func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) { // Once we have our basics, let's look for our (optional) configuration file confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"} confdirs = append(confdirs, extraConfigPaths...) // We prefix the optional configuration path (so we look there first) if !isControlLoop && IPAM != nil { if IPAM.ConfigurationPath != "" { confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...) } } // Cycle through the path and parse the JSON config flatipam := types.Net{} foundflatfile := "" for _, confpath := range confdirs { if pathExists(confpath) { jsonFile, err := os.Open(confpath) if err != nil { return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err) } defer jsonFile.Close() jsonBytes, err := io.ReadAll(jsonFile) if err !=
} return fmt.Errorf("IP %s not v4 nor v6", *ip) } // LoadIPAMConfig creates IPAMConfig using json encoded configuration provided
random_line_split
config.go
(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, string, error) { var n types.Net if err := json.Unmarshal(bytes, &n); err != nil { return nil, "", fmt.Errorf("LoadIPAMConfig - JSON Parsing Error: %s / bytes: %s", err, bytes) } if n.IPAM == nil { return nil, "", fmt.Errorf("IPAM config missing 'ipam' key") } else if !isNetworkRelevant(n.IPAM) { return nil, "", NewInvalidPluginError(n.IPAM.Type) } args := types.IPAMEnvArgs{} if err := cnitypes.LoadArgs(envArgs, &args); err != nil { return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err) } n.IPAM.PodName = string(args.K8S_POD_NAME) n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE) flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...) if err != nil { return nil, "", err } // Now let's try to merge the configurations... // NB: Don't try to do any initialization before this point or it won't account for merged flat file. var OverlappingRanges bool = n.IPAM.OverlappingRanges if err := mergo.Merge(&n, flatipam); err != nil { logging.Errorf("Merge error with flat file: %s", err) } n.IPAM.OverlappingRanges = OverlappingRanges // Logging if n.IPAM.LogFile != "" { logging.SetLogFile(n.IPAM.LogFile) } if n.IPAM.LogLevel != "" { logging.SetLogLevel(n.IPAM.LogLevel) } if foundflatfile != "" { logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile) } if n.IPAM.Range != "" { oldRange := types.RangeConfiguration{ OmitRanges: n.IPAM.OmitRanges, Range: n.IPAM.Range, RangeStart: n.IPAM.RangeStart, RangeEnd: n.IPAM.RangeEnd, } n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...) } for idx := range n.IPAM.IPRanges { if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 { firstip := netutils.ParseIPSloppy(r[0]) if firstip == nil { return nil, "", fmt.Errorf("invalid range start IP: %s", r[0]) } lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err) } if !ipNet.Contains(firstip) { return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip) } n.IPAM.IPRanges[idx].Range = ipNet.String() n.IPAM.IPRanges[idx].RangeStart = firstip n.IPAM.IPRanges[idx].RangeEnd = lastip } else { firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range) if err != nil { return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err) } n.IPAM.IPRanges[idx].Range = ipNet.String() if n.IPAM.IPRanges[idx].RangeStart == nil { firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address n.IPAM.IPRanges[idx].RangeStart = firstip } } } n.IPAM.OmitRanges = nil n.IPAM.Range = "" n.IPAM.RangeStart = nil n.IPAM.RangeEnd = nil if n.IPAM.Kubernetes.KubeConfigPath == "" { return nil, "", storageError() } if n.IPAM.GatewayStr != "" { gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr) if gwip == nil { return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr) } n.IPAM.Gateway = gwip } for i := range n.IPAM.OmitRanges { _, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err) } } if err := configureStatic(&n, args); err != nil { return nil, "", err } if n.IPAM.LeaderLeaseDuration == 0 { n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration } if n.IPAM.LeaderRenewDeadline == 0 { n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline } if n.IPAM.LeaderRetryPeriod == 0 { n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod } // Copy net name into IPAM so not to drag Net struct around n.IPAM.Name = n.Name return n.IPAM, n.CNIVersion, nil } func pathExists(path string) bool { _, err := os.Stat(path) if err == nil { return true } if os.IsNotExist(err) { return false } return true } func configureStatic(n *types.Net, args types.IPAMEnvArgs) error { // Validate all ranges numV4 := 0 numV6 := 0 for i := range n.IPAM.Addresses { ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr) if err != nil { return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err) } n.IPAM.Addresses[i].Address = *addr n.IPAM.Addresses[i].Address.IP = ip if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil { return fmt.Errorf("invalid address %d: %s", i, err) } if n.IPAM.Addresses[i].Address.IP.To4() != nil { n.IPAM.Addresses[i].Version = "4" numV4++ } else { n.IPAM.Addresses[i].Version = "6" numV6++ } } newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args) if err != nil { return err } numV4 = newnumV4 numV6 = newnumV6 // CNI spec 0.2.0 and below supported only one v4 and v6 address if numV4 > 1 || numV6 > 1 { for _, v := range types020.SupportedVersions { if n.CNIVersion == v { return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion) } } } return nil } func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) { // Once we have our basics, let's look for our (optional) configuration file confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"} confdirs = append(confdirs, extraConfigPaths...) // We prefix the optional configuration path (so we look there first) if !isControlLoop && IPAM != nil { if IPAM.ConfigurationPath != "" { confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...) } } // Cycle through the path and parse the JSON config flatipam := types.Net{} foundflatfile := "" for _, confpath := range confdirs { if pathExists(confpath) { jsonFile, err := os.Open(confpath) if err != nil { return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err) } defer jsonFile.Close() jsonBytes, err := io.ReadAll(jsonFile) if err != nil { return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - io.ReadAll error: %s", confpath, err) } if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil { return flatipam,
LoadIPAMConfig
identifier_name
config.go
flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...) if err != nil { return nil, "", err } // Now let's try to merge the configurations... // NB: Don't try to do any initialization before this point or it won't account for merged flat file. var OverlappingRanges bool = n.IPAM.OverlappingRanges if err := mergo.Merge(&n, flatipam); err != nil { logging.Errorf("Merge error with flat file: %s", err) } n.IPAM.OverlappingRanges = OverlappingRanges // Logging if n.IPAM.LogFile != "" { logging.SetLogFile(n.IPAM.LogFile) } if n.IPAM.LogLevel != "" { logging.SetLogLevel(n.IPAM.LogLevel) } if foundflatfile != "" { logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile) } if n.IPAM.Range != "" { oldRange := types.RangeConfiguration{ OmitRanges: n.IPAM.OmitRanges, Range: n.IPAM.Range, RangeStart: n.IPAM.RangeStart, RangeEnd: n.IPAM.RangeEnd, } n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...) } for idx := range n.IPAM.IPRanges { if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 { firstip := netutils.ParseIPSloppy(r[0]) if firstip == nil { return nil, "", fmt.Errorf("invalid range start IP: %s", r[0]) } lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err) } if !ipNet.Contains(firstip) { return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip) } n.IPAM.IPRanges[idx].Range = ipNet.String() n.IPAM.IPRanges[idx].RangeStart = firstip n.IPAM.IPRanges[idx].RangeEnd = lastip } else { firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range) if err != nil { return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err) } n.IPAM.IPRanges[idx].Range = ipNet.String() if n.IPAM.IPRanges[idx].RangeStart == nil { firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address n.IPAM.IPRanges[idx].RangeStart = firstip } } } n.IPAM.OmitRanges = nil n.IPAM.Range = "" n.IPAM.RangeStart = nil n.IPAM.RangeEnd = nil if n.IPAM.Kubernetes.KubeConfigPath == "" { return nil, "", storageError() } if n.IPAM.GatewayStr != "" { gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr) if gwip == nil { return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr) } n.IPAM.Gateway = gwip } for i := range n.IPAM.OmitRanges { _, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i]) if err != nil { return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err) } } if err := configureStatic(&n, args); err != nil { return nil, "", err } if n.IPAM.LeaderLeaseDuration == 0 { n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration } if n.IPAM.LeaderRenewDeadline == 0 { n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline } if n.IPAM.LeaderRetryPeriod == 0 { n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod } // Copy net name into IPAM so not to drag Net struct around n.IPAM.Name = n.Name return n.IPAM, n.CNIVersion, nil } func pathExists(path string) bool { _, err := os.Stat(path) if err == nil { return true } if os.IsNotExist(err) { return false } return true } func configureStatic(n *types.Net, args types.IPAMEnvArgs) error { // Validate all ranges numV4 := 0 numV6 := 0 for i := range n.IPAM.Addresses { ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr) if err != nil { return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err) } n.IPAM.Addresses[i].Address = *addr n.IPAM.Addresses[i].Address.IP = ip if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil { return fmt.Errorf("invalid address %d: %s", i, err) } if n.IPAM.Addresses[i].Address.IP.To4() != nil { n.IPAM.Addresses[i].Version = "4" numV4++ } else { n.IPAM.Addresses[i].Version = "6" numV6++ } } newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args) if err != nil { return err } numV4 = newnumV4 numV6 = newnumV6 // CNI spec 0.2.0 and below supported only one v4 and v6 address if numV4 > 1 || numV6 > 1 { for _, v := range types020.SupportedVersions { if n.CNIVersion == v { return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion) } } } return nil } func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) { // Once we have our basics, let's look for our (optional) configuration file confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"} confdirs = append(confdirs, extraConfigPaths...) // We prefix the optional configuration path (so we look there first) if !isControlLoop && IPAM != nil { if IPAM.ConfigurationPath != "" { confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...) } } // Cycle through the path and parse the JSON config flatipam := types.Net{} foundflatfile := "" for _, confpath := range confdirs { if pathExists(confpath) { jsonFile, err := os.Open(confpath) if err != nil { return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err) } defer jsonFile.Close() jsonBytes, err := io.ReadAll(jsonFile) if err != nil { return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - io.ReadAll error: %s", confpath, err) } if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil { return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s
{ var n types.Net if err := json.Unmarshal(bytes, &n); err != nil { return nil, "", fmt.Errorf("LoadIPAMConfig - JSON Parsing Error: %s / bytes: %s", err, bytes) } if n.IPAM == nil { return nil, "", fmt.Errorf("IPAM config missing 'ipam' key") } else if !isNetworkRelevant(n.IPAM) { return nil, "", NewInvalidPluginError(n.IPAM.Type) } args := types.IPAMEnvArgs{} if err := cnitypes.LoadArgs(envArgs, &args); err != nil { return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err) } n.IPAM.PodName = string(args.K8S_POD_NAME) n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE)
identifier_body
ontology.js
function displayGeography(){ document.getElementById("ontology_lookup").style.display='none'; document.getElementById("geographic_location").style.display=''; document.getElementById("map_canvas").style.visibility='visible'; } /* Initializes the Google Map. */ function initialize(){ geocoder = new google.maps.Geocoder(); var latlng = new google.maps.LatLng(0,0); var myOptions = { zoom: 1, center: latlng, mapTypeId: google.maps.MapTypeId.ROADMAP } map = new google.maps.Map(document.getElementById("map_canvas"), myOptions); } /* Removes the overlays from the map, but keeps them in the array* //not used, since we are re-initializing the map function clearOverlays() { if (markersArray) { for (i in markersArray) { markersArray[i].setMap(null) //infoWindowArray[i].close() } markersArray.length = 0; //infoWindowArray.length = 0; //markersArray=new Array(); } } */ var elevator = new google.maps.ElevationService(); /* This function preps the addresses and calls the geocoder */ function codeAddress() { //we reinitialize each time this is called, so it recenters on the world //I did this since it is difficult to zoom based on the lat/lngs initialize(); //get the locations from the input box var address = document.getElementById("address").value; //convert the input box into an array address_array=convert_terms_to_array(address) //iterate over the addresses and append the "loc:" tag to the beginning //which overwrites google point of interest detector saved_address_array=new Array(); for (var i=0; i<address_array.length-1; i++){ if (address_array[i] != ''){ address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '') saved_address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '') }else if (address_array[i] == '' && address_array[i-1]!=''){ address_array[i]=address_array[i-1] saved_address_array[i]=address_array[i-1] }else{ saved_address_array[i]=address_array[i] } } //get a unique list of the address unique_addresses=unique(address_array) //no longer needed since we are re-initializing //clearOverlays(); latitude=new Array(); longitude=new Array(); elevation=new Array(); var latlong var iterator=0; timer_ms=0; if (geocoder) { //give status updates document.getElementById("loading_status").innerHTML='Loading coordinates' //iterate over the addresses and append a timing event, since google //has a query limit per second for (var i=0; i<unique_addresses.length; i++){ if (unique_addresses[i]!=''){ var lat2=setTimeout('geocode_results('+i+')',timer_ms) timer_ms+=700 } } //append to the status after all points should have loaded setTimeout("document.getElementById('loading_status').innerHTML='Completed'",timer_ms) } } /* This function gets the Lat/Long using Google Maps Geocoder API. */ function geocode_results(i){ //query google maps for lat/lngs geocoder.geocode( { 'address': unique_addresses[i]}, function(results, status) { if (status == google.maps.GeocoderStatus.OK) { var latlong = new google.maps.LatLng(results[0].geometry.location.lat(),results[0].geometry.location.lng()); var pinImage = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_letter&chld= |" + 'FF0000', new google.maps.Size(21, 34), new google.maps.Point(0,0), new google.maps.Point(10, 34)); var pinShadow = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_shadow", new google.maps.Size(40, 37), new google.maps.Point(0, 0), new google.maps.Point(12, 35)); /* This function gets the Elevation using Google Maps Elevations API. */ elevator.getElevationForLocations({'locations':[latlong]}, function(results2, status2){ if (status == google.maps.ElevationStatus.OK) { // Retrieve the first result if (results2[0]) { //assign lat/lng/elev to arrays latitude[unique_addresses[i]]=results2[0].location.lat() longitude[unique_addresses[i]]=results2[0].location.lng() elevation[unique_addresses[i]]=results2[0].elevation; //put a pointer on the map markersArray[unique_addresses[i]] = new google.maps.Marker({ position: latlong, map: map, color: '#FF0000', clickable: false, icon: pinImage, shadow: pinShadow }); } else { alert("No elevation results found!"); } } }); }else{ alert(status) alert("Unable to find the Location you specified!"); } }) } /* This function outputs the Lat/Long/Elev to the Console. */ function output_latlong(){ //generate the output content type=document.getElementById('latlngType').value var content=''; for (var i=0; i<saved_address_array.length; i++) { if (type=='Latitude'){ content=content+latitude[saved_address_array[i]]+'<br>'; }else if (type=='Longitude'){ content=content+longitude[saved_address_array[i]]+'<br>'; }else if (type=='Elevation'){ content=content+elevation[saved_address_array[i]]+'<br>'; } } //write page top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1') top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>') top.consoleRef.document.close() } /* This is the AJAX function which produces the list of terms below each input box. It takes as input: 1) the ontology select box id 2) the query string 3) the input box id 4) the txt box below input id */ function showResult(ont_id,str,inputbox_id,txt_id) { // If the substring length is empty, then do nothing if (str.length==0) { return; } // If the substring is at least one in length, then search for similar terms // in the ontologies selected. This is where we can set the length to start //searches (i.e. after 3 letters are present. else if (str.length>0){ //remove text or checkmark next to the input box and change font color //to black document.getElementById('valid'+inputbox_id).innerHTML=""; document.getElementById('valid'+inputbox_id).style.color="black"; xmlhttp=GetXmlHttpObject() //check if browser can perform xmlhttp if (xmlhttp==null){ alert ("Your browser does not support XML HTTP Request"); return; } //get the list of ontologies using the ontology id ont_list=document.getElementById(ont_id) //get only the selected ontologies and convert to PL/SQL formatted text selected_ont=get_selected(ont_list) //generate a url string where we pass our variables var url="ontology_search.psp"; url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id; url=url+"&sid="+Math.random(); xmlhttp.onreadystatechange=function() { if (xmlhttp.readyState==4){ //write the list of similar terms from the database document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText; document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2"; xmlhttp.close(); } } //perform a GET xmlhttp.open("GET",url,true); xmlhttp.send(null) } } /* This is the AJAX function which validates the terms in each input box. It takes as input: 1) the ontology select box id 2) the initial list of ontology terms from user 3) the table where all results should be written 4) whether this is the first call of this function 5) whether this is an export call */ function validateInput(ont_id,ont_term_list,table_id,new_data,export_data) { //if no data is input produce an alert if (ont_term_list.length==0){ alert("Paste some data in the input box!"); return;
{ document.getElementById("ontology_lookup").style.display=''; document.getElementById("geographic_location").style.display='none'; document.getElementById("map_canvas").style.visibility='hidden'; }
identifier_body
ontology.js
loading_status').innerHTML='Completed'",timer_ms) } } /* This function gets the Lat/Long using Google Maps Geocoder API. */ function geocode_results(i){ //query google maps for lat/lngs geocoder.geocode( { 'address': unique_addresses[i]}, function(results, status) { if (status == google.maps.GeocoderStatus.OK) { var latlong = new google.maps.LatLng(results[0].geometry.location.lat(),results[0].geometry.location.lng()); var pinImage = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_letter&chld= |" + 'FF0000', new google.maps.Size(21, 34), new google.maps.Point(0,0), new google.maps.Point(10, 34)); var pinShadow = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_shadow", new google.maps.Size(40, 37), new google.maps.Point(0, 0), new google.maps.Point(12, 35)); /* This function gets the Elevation using Google Maps Elevations API. */ elevator.getElevationForLocations({'locations':[latlong]}, function(results2, status2){ if (status == google.maps.ElevationStatus.OK) { // Retrieve the first result if (results2[0]) { //assign lat/lng/elev to arrays latitude[unique_addresses[i]]=results2[0].location.lat() longitude[unique_addresses[i]]=results2[0].location.lng() elevation[unique_addresses[i]]=results2[0].elevation; //put a pointer on the map markersArray[unique_addresses[i]] = new google.maps.Marker({ position: latlong, map: map, color: '#FF0000', clickable: false, icon: pinImage, shadow: pinShadow }); } else { alert("No elevation results found!"); } } }); }else{ alert(status) alert("Unable to find the Location you specified!"); } }) } /* This function outputs the Lat/Long/Elev to the Console. */ function
(){ //generate the output content type=document.getElementById('latlngType').value var content=''; for (var i=0; i<saved_address_array.length; i++) { if (type=='Latitude'){ content=content+latitude[saved_address_array[i]]+'<br>'; }else if (type=='Longitude'){ content=content+longitude[saved_address_array[i]]+'<br>'; }else if (type=='Elevation'){ content=content+elevation[saved_address_array[i]]+'<br>'; } } //write page top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1') top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>') top.consoleRef.document.close() } /* This is the AJAX function which produces the list of terms below each input box. It takes as input: 1) the ontology select box id 2) the query string 3) the input box id 4) the txt box below input id */ function showResult(ont_id,str,inputbox_id,txt_id) { // If the substring length is empty, then do nothing if (str.length==0) { return; } // If the substring is at least one in length, then search for similar terms // in the ontologies selected. This is where we can set the length to start //searches (i.e. after 3 letters are present. else if (str.length>0){ //remove text or checkmark next to the input box and change font color //to black document.getElementById('valid'+inputbox_id).innerHTML=""; document.getElementById('valid'+inputbox_id).style.color="black"; xmlhttp=GetXmlHttpObject() //check if browser can perform xmlhttp if (xmlhttp==null){ alert ("Your browser does not support XML HTTP Request"); return; } //get the list of ontologies using the ontology id ont_list=document.getElementById(ont_id) //get only the selected ontologies and convert to PL/SQL formatted text selected_ont=get_selected(ont_list) //generate a url string where we pass our variables var url="ontology_search.psp"; url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id; url=url+"&sid="+Math.random(); xmlhttp.onreadystatechange=function() { if (xmlhttp.readyState==4){ //write the list of similar terms from the database document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText; document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2"; xmlhttp.close(); } } //perform a GET xmlhttp.open("GET",url,true); xmlhttp.send(null) } } /* This is the AJAX function which validates the terms in each input box. It takes as input: 1) the ontology select box id 2) the initial list of ontology terms from user 3) the table where all results should be written 4) whether this is the first call of this function 5) whether this is an export call */ function validateInput(ont_id,ont_term_list,table_id,new_data,export_data) { //if no data is input produce an alert if (ont_term_list.length==0){ alert("Paste some data in the input box!"); return; } //get the list of ontologies using the ontology id ontologies=document.getElementById(ont_id) //get only the selected ontologies and convert to PL/SQL formatted text selected_ont=get_selected(ontologies) //if no ontology is selected produce an alert if (selected_ont==''){ alert("Select at least one Ontology!") return; } //take the pasted terms from user and convert those terms to an array ont_term_array=convert_terms_to_array(ont_term_list); //save this original list of terms from the user into an array original_ont_term_array=ont_term_array; //create an array to store the terms from the input boxes as they are being //modified updated_unique_terms=new Array(); //if this is the first call to this function, create a unique list of terms //build the input boxes if (new_data == 'True') { //original_ont_term_array=new Array(); original_unique_terms=new Array(); //remove old input boxes, so the user can re-use the app over and over clear_inputs(table_id) //generate unique list and input boxes unique_ont_array=write_input_boxes(ont_term_array,table_id); //store unique ontology terms for later use original_unique_terms=unique_ont_array; updated_unique_terms=unique_ont_array; } //If this is not the first call, retrieve values from input boxes else { //get the values from the input boxes unique_ont_array=get_inputs(unique_ont_array); updated_unique_terms=unique_ont_array; } //check if browser can perform xmlhttp xmlhttp=GetXmlHttpObject() if (xmlhttp==null){ alert ("Your browser does not support XML HTTP Request"); return; } /* var url="ontology_validate.psp"; url=url+"?ont_id="+selected_ont+"&ont_terms="+unique_ont_array; url=url+"&sid="+Math.random(); xmlhttp.onreadystatechange=function() { if (xmlhttp.readyState==4) { //since the response from the PL/SQL is a string using the "#' //delimitor, so we need to split and write them to the table validity=xmlhttp.responseText.split('#') for (var i=0; i<validity.length;i++){ //determine if an input value is valid and write 'Invalid' or a //checkbox accordingly if (validity[i]=='Valid' || validity[i]=='Valid\n'){ document.getElementById('validtxtbox'+(i)).innerHTML='&#10003;'; document.getElementById('validtxtbox'+(i)).style.color="green"; }else if (validity[i]=='Invalid' || validity[i]=='Invalid\n'){ document.getElementById('validtxtbox'+(i)).innerHTML=validity[i]; document.getElementById('validtxtbox'+(i)).style.color="red"; } } } } //perform a GET xmlhttp.open("GET",url,true); xmlhttp.send(null) */ //If the data is supposed to be exported, write the data to the new window if (export_data=='True'){ write_data_to_new_window(original_ont_term
output_latlong
identifier_name
ontology.js
_address_array[i]]+'<br>'; }else if (type=='Elevation'){ content=content+elevation[saved_address_array[i]]+'<br>'; } } //write page top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1') top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>') top.consoleRef.document.close() } /* This is the AJAX function which produces the list of terms below each input box. It takes as input: 1) the ontology select box id 2) the query string 3) the input box id 4) the txt box below input id */ function showResult(ont_id,str,inputbox_id,txt_id) { // If the substring length is empty, then do nothing if (str.length==0) { return; } // If the substring is at least one in length, then search for similar terms // in the ontologies selected. This is where we can set the length to start //searches (i.e. after 3 letters are present. else if (str.length>0){ //remove text or checkmark next to the input box and change font color //to black document.getElementById('valid'+inputbox_id).innerHTML=""; document.getElementById('valid'+inputbox_id).style.color="black"; xmlhttp=GetXmlHttpObject() //check if browser can perform xmlhttp if (xmlhttp==null){ alert ("Your browser does not support XML HTTP Request"); return; } //get the list of ontologies using the ontology id ont_list=document.getElementById(ont_id) //get only the selected ontologies and convert to PL/SQL formatted text selected_ont=get_selected(ont_list) //generate a url string where we pass our variables var url="ontology_search.psp"; url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id; url=url+"&sid="+Math.random(); xmlhttp.onreadystatechange=function() { if (xmlhttp.readyState==4){ //write the list of similar terms from the database document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText; document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2"; xmlhttp.close(); } } //perform a GET xmlhttp.open("GET",url,true); xmlhttp.send(null) } } /* This is the AJAX function which validates the terms in each input box. It takes as input: 1) the ontology select box id 2) the initial list of ontology terms from user 3) the table where all results should be written 4) whether this is the first call of this function 5) whether this is an export call */ function validateInput(ont_id,ont_term_list,table_id,new_data,export_data) { //if no data is input produce an alert if (ont_term_list.length==0){ alert("Paste some data in the input box!"); return; } //get the list of ontologies using the ontology id ontologies=document.getElementById(ont_id) //get only the selected ontologies and convert to PL/SQL formatted text selected_ont=get_selected(ontologies) //if no ontology is selected produce an alert if (selected_ont==''){ alert("Select at least one Ontology!") return; } //take the pasted terms from user and convert those terms to an array ont_term_array=convert_terms_to_array(ont_term_list); //save this original list of terms from the user into an array original_ont_term_array=ont_term_array; //create an array to store the terms from the input boxes as they are being //modified updated_unique_terms=new Array(); //if this is the first call to this function, create a unique list of terms //build the input boxes if (new_data == 'True') { //original_ont_term_array=new Array(); original_unique_terms=new Array(); //remove old input boxes, so the user can re-use the app over and over clear_inputs(table_id) //generate unique list and input boxes unique_ont_array=write_input_boxes(ont_term_array,table_id); //store unique ontology terms for later use original_unique_terms=unique_ont_array; updated_unique_terms=unique_ont_array; } //If this is not the first call, retrieve values from input boxes else { //get the values from the input boxes unique_ont_array=get_inputs(unique_ont_array); updated_unique_terms=unique_ont_array; } //check if browser can perform xmlhttp xmlhttp=GetXmlHttpObject() if (xmlhttp==null){ alert ("Your browser does not support XML HTTP Request"); return; } /* var url="ontology_validate.psp"; url=url+"?ont_id="+selected_ont+"&ont_terms="+unique_ont_array; url=url+"&sid="+Math.random(); xmlhttp.onreadystatechange=function() { if (xmlhttp.readyState==4) { //since the response from the PL/SQL is a string using the "#' //delimitor, so we need to split and write them to the table validity=xmlhttp.responseText.split('#') for (var i=0; i<validity.length;i++){ //determine if an input value is valid and write 'Invalid' or a //checkbox accordingly if (validity[i]=='Valid' || validity[i]=='Valid\n'){ document.getElementById('validtxtbox'+(i)).innerHTML='&#10003;'; document.getElementById('validtxtbox'+(i)).style.color="green"; }else if (validity[i]=='Invalid' || validity[i]=='Invalid\n'){ document.getElementById('validtxtbox'+(i)).innerHTML=validity[i]; document.getElementById('validtxtbox'+(i)).style.color="red"; } } } } //perform a GET xmlhttp.open("GET",url,true); xmlhttp.send(null) */ //If the data is supposed to be exported, write the data to the new window if (export_data=='True'){ write_data_to_new_window(original_ont_term_array,original_unique_terms,updated_unique_terms); } } function GetXmlHttpObject() { if (window.XMLHttpRequest) { // code for IE7+, Firefox, Chrome, Opera, Safari return new XMLHttpRequest(); } if (window.ActiveXObject) { // code for IE6, IE5 return new ActiveXObject("Microsoft.XMLHTTP"); } return null; } /* This function changes the input box value when the user clicks on a term in the list of terms */ function change_form_value(form_field,form_value,inputbox_id){ //change the input box value document.getElementById(form_field).value=form_value; //Clear the list of ontology terms document.getElementById('input'+inputbox_id).innerHTML=''; document.getElementById('input'+inputbox_id).style.border="0px"; //Add a checkmark next to the input box document.getElementById('valid'+form_field).innerHTML='&#10003;'; document.getElementById('valid'+form_field).style.color="green"; } /* when iterating through list of ontology terms, upon onfocus, this changes the background to cyan */ function setStyle(x) { document.getElementById(x).style.background="cyan" } /* when iterating through list of ontology terms, when removing focus (onblur), this changes the background to cyan */ function removeStyle(x) { document.getElementById(x).style.background="white" } /* This function checks to see if all input boxes are valid, updates the original list of terms from the user, with the corrected terms, then calls the function to write the data to the new window */ function write_data_to_new_window(original_ont_term_array, original_unique_terms, updated_unique_terms){ //Determine that all terms are valid for (var i=0;i<original_unique_terms.length;i++){ if (original_unique_terms[i]!=''){ validity=document.getElementById('validtxtbox'+(i)).innerHTML if ( validity=='' || validity=='Click Input Box...'){ alert('You need choose valid terms!'); return; }else if (validity=='Invalid' || validity=='Invalid\n'){ alert('You have invalid terms!'); return; } } } //generate a new array with update terms based on the valid input boxes output_array=new Array(); //using length-1 since we appended an empty element to the list in the //convert_terms_to_array function. for (var j=0;j<original_ont_term_array.length-1;j++){ for (var k=0;k<original_unique_terms.length;k++){
if (original_ont_term_array[j]==original_unique_terms[k]){
random_line_split
ontology.js
} }); }else{ alert(status) alert("Unable to find the Location you specified!"); } }) } /* This function outputs the Lat/Long/Elev to the Console. */ function output_latlong(){ //generate the output content type=document.getElementById('latlngType').value var content=''; for (var i=0; i<saved_address_array.length; i++) { if (type=='Latitude'){ content=content+latitude[saved_address_array[i]]+'<br>'; }else if (type=='Longitude'){ content=content+longitude[saved_address_array[i]]+'<br>'; }else if (type=='Elevation'){ content=content+elevation[saved_address_array[i]]+'<br>'; } } //write page top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1') top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>') top.consoleRef.document.close() } /* This is the AJAX function which produces the list of terms below each input box. It takes as input: 1) the ontology select box id 2) the query string 3) the input box id 4) the txt box below input id */ function showResult(ont_id,str,inputbox_id,txt_id) { // If the substring length is empty, then do nothing if (str.length==0) { return; } // If the substring is at least one in length, then search for similar terms // in the ontologies selected. This is where we can set the length to start //searches (i.e. after 3 letters are present. else if (str.length>0){ //remove text or checkmark next to the input box and change font color //to black document.getElementById('valid'+inputbox_id).innerHTML=""; document.getElementById('valid'+inputbox_id).style.color="black"; xmlhttp=GetXmlHttpObject() //check if browser can perform xmlhttp if (xmlhttp==null){ alert ("Your browser does not support XML HTTP Request"); return; } //get the list of ontologies using the ontology id ont_list=document.getElementById(ont_id) //get only the selected ontologies and convert to PL/SQL formatted text selected_ont=get_selected(ont_list) //generate a url string where we pass our variables var url="ontology_search.psp"; url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id; url=url+"&sid="+Math.random(); xmlhttp.onreadystatechange=function() { if (xmlhttp.readyState==4){ //write the list of similar terms from the database document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText; document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2"; xmlhttp.close(); } } //perform a GET xmlhttp.open("GET",url,true); xmlhttp.send(null) } } /* This is the AJAX function which validates the terms in each input box. It takes as input: 1) the ontology select box id 2) the initial list of ontology terms from user 3) the table where all results should be written 4) whether this is the first call of this function 5) whether this is an export call */ function validateInput(ont_id,ont_term_list,table_id,new_data,export_data) { //if no data is input produce an alert if (ont_term_list.length==0){ alert("Paste some data in the input box!"); return; } //get the list of ontologies using the ontology id ontologies=document.getElementById(ont_id) //get only the selected ontologies and convert to PL/SQL formatted text selected_ont=get_selected(ontologies) //if no ontology is selected produce an alert if (selected_ont==''){ alert("Select at least one Ontology!") return; } //take the pasted terms from user and convert those terms to an array ont_term_array=convert_terms_to_array(ont_term_list); //save this original list of terms from the user into an array original_ont_term_array=ont_term_array; //create an array to store the terms from the input boxes as they are being //modified updated_unique_terms=new Array(); //if this is the first call to this function, create a unique list of terms //build the input boxes if (new_data == 'True') { //original_ont_term_array=new Array(); original_unique_terms=new Array(); //remove old input boxes, so the user can re-use the app over and over clear_inputs(table_id) //generate unique list and input boxes unique_ont_array=write_input_boxes(ont_term_array,table_id); //store unique ontology terms for later use original_unique_terms=unique_ont_array; updated_unique_terms=unique_ont_array; } //If this is not the first call, retrieve values from input boxes else { //get the values from the input boxes unique_ont_array=get_inputs(unique_ont_array); updated_unique_terms=unique_ont_array; } //check if browser can perform xmlhttp xmlhttp=GetXmlHttpObject() if (xmlhttp==null){ alert ("Your browser does not support XML HTTP Request"); return; } /* var url="ontology_validate.psp"; url=url+"?ont_id="+selected_ont+"&ont_terms="+unique_ont_array; url=url+"&sid="+Math.random(); xmlhttp.onreadystatechange=function() { if (xmlhttp.readyState==4) { //since the response from the PL/SQL is a string using the "#' //delimitor, so we need to split and write them to the table validity=xmlhttp.responseText.split('#') for (var i=0; i<validity.length;i++){ //determine if an input value is valid and write 'Invalid' or a //checkbox accordingly if (validity[i]=='Valid' || validity[i]=='Valid\n'){ document.getElementById('validtxtbox'+(i)).innerHTML='&#10003;'; document.getElementById('validtxtbox'+(i)).style.color="green"; }else if (validity[i]=='Invalid' || validity[i]=='Invalid\n'){ document.getElementById('validtxtbox'+(i)).innerHTML=validity[i]; document.getElementById('validtxtbox'+(i)).style.color="red"; } } } } //perform a GET xmlhttp.open("GET",url,true); xmlhttp.send(null) */ //If the data is supposed to be exported, write the data to the new window if (export_data=='True'){ write_data_to_new_window(original_ont_term_array,original_unique_terms,updated_unique_terms); } } function GetXmlHttpObject() { if (window.XMLHttpRequest) { // code for IE7+, Firefox, Chrome, Opera, Safari return new XMLHttpRequest(); } if (window.ActiveXObject) { // code for IE6, IE5 return new ActiveXObject("Microsoft.XMLHTTP"); } return null; } /* This function changes the input box value when the user clicks on a term in the list of terms */ function change_form_value(form_field,form_value,inputbox_id){ //change the input box value document.getElementById(form_field).value=form_value; //Clear the list of ontology terms document.getElementById('input'+inputbox_id).innerHTML=''; document.getElementById('input'+inputbox_id).style.border="0px"; //Add a checkmark next to the input box document.getElementById('valid'+form_field).innerHTML='&#10003;'; document.getElementById('valid'+form_field).style.color="green"; } /* when iterating through list of ontology terms, upon onfocus, this changes the background to cyan */ function setStyle(x) { document.getElementById(x).style.background="cyan" } /* when iterating through list of ontology terms, when removing focus (onblur), this changes the background to cyan */ function removeStyle(x) { document.getElementById(x).style.background="white" } /* This function checks to see if all input boxes are valid, updates the original list of terms from the user, with the corrected terms, then calls the function to write the data to the new window */ function write_data_to_new_window(original_ont_term_array, original_unique_terms, updated_unique_terms){ //Determine that all terms are valid for (var i=0;i<original_unique_terms.length;i++){ if (original_unique_terms[i]!=''){ validity=document.getElementById('validtxtbox'+(i)).innerHTML if ( validity=='' || validity=='Click Input Box...')
{ alert('You need choose valid terms!'); return; }
conditional_block
render.js
(); if (neighboorDistance <= distance) { walk(cells, edges, neighboorCell, distance - neighboorDistance); } } } } window.addEventListener("keyup", function (e) { var char = String.fromCharCode(e.keyCode); if (char == 'R') { newVoronoi(); } else if (char == 'T') { var x = Math.random(), y = Math.random(); var r = 0.05; if (map) { var site = map.voronoi.find(x, y); if (site) { var cell = map.voronoi.cells[site.index]; walk(map.voronoi.cells, map.voronoi.edges, cell, r); mesh.map.geometry.colorsNeedUpdate = true; } } } }); var lastMouse; var cameraTarget = new THREE.Vector3(0, 0, 0); window.addEventListener("mousemove", function (e) { e.preventDefault(); if ( e.buttons == 4 ) { var diff = cameraTarget.clone().sub(camera.position); diff.y = 0; var radious = diff.length(); if (!lastMouse) { lastMouse = { x:e.clientX, y:e.clientY, phi: camera.position.angleTo(new THREE.Vector3(0,0,1)), theta: Math.asin(camera.position.x / radious) }; } var theta = lastMouse.theta + ( ( e.clientY - lastMouse.y ) * Math.PI * 0.01 ); var phi = lastMouse.phi + ( ( e.clientX - lastMouse.x ) * 0.5 ); camera.position.x = radious * Math.sin( theta ); camera.position.z = radious * Math.cos( theta ); camera.lookAt(cameraTarget); camera.updateMatrix(); } else { lastMouse = null; } }); window.addEventListener("wheel", function (e) { var direction = cameraTarget.clone().sub(camera.position); var len = direction.length(); direction.normalize(); camera.position.add(direction.multiplyScalar(e.deltaY * -0.01 * Math.min(1, Math.pow(len, 5)))); }); //var rand = seedrandom("andre", { global: true }); var container = document.body; var scene = new THREE.Scene(); var camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 0.1, 1000 ); camera.position.set( 0, 2, -2 ); camera.lookAt(cameraTarget); scene.add( camera ); var light = new THREE.PointLight( 0xffffff, 0.8 ); camera.add( light ); var grid = new THREE.GridHelper(2, 20); scene.add(grid); var renderer = new THREE.WebGLRenderer( { antialias: true } ); renderer.setClearColor( 0xf0f0f0 ); renderer.setPixelRatio( window.devicePixelRatio ); renderer.setSize( window.innerWidth, window.innerHeight ); container.appendChild( renderer.domElement ); var stats = new Stats(); container.appendChild( stats.dom ); var RENDER = { biome: 0, height: 1, humidity: 2, height_X_humidity: 3 } function Settings() { this.seed = 0; this.sites = 4000; this.showPolygons = false; this.showSites = false; this.flat = false; this.seaLevel = 0.45; this.heightFrequency = 1.2; this.heightScale = 0.3; this.render = "biome"; } var gui = new dat.GUI(); var settings = new Settings(); gui.add(settings, "sites", 0).step(50).onFinishChange(newVoronoi); gui.add(settings, "heightScale", 0, 1).step(0.01); gui.add(settings, "heightFrequency").step(0.01).onFinishChange(newVoronoi); gui.add(settings, "showPolygons").onFinishChange(function(v) { if (mesh) { mesh.border.visible = v; } }); gui.add(settings, "showSites").onFinishChange(function(v) { if (mesh) { mesh.sites.visible = v; } });; gui.add(settings, "flat").onFinishChange(makeFlat); gui.add(settings, "seaLevel", 0, 1).step(0.01).onFinishChange(newVoronoi); gui.add(settings, "render", Object.keys(RENDER)).onFinishChange(changeRender); function changeRender()
function makeFlat(f) { function mf(g) { if (!g.old) { g.old = g.vertices.map(function(v,i) { return v.y; }); } g.vertices.map(function (v, i) { if (f) { v.y = 0; } else { v.y = g.old[i]; } }); g.dynamic = g.verticesNeedUpdate = true; } if (mesh) { mf(mesh.map.geometry); mf(mesh.sites.geometry); mf(mesh.border.geometry); if (!f) { mesh.map.material = mesh.map.originalMaterial; } else { mesh.map.material = new THREE.MeshBasicMaterial( { side: THREE.FrontSide, vertexColors: THREE.FaceColors } ); } } } var width = 1; var height = 1; function createVoronoi(nsites) { var width = 1; var height = 1; var voronoi = d3.voronoi() .extent([[0, 0], [width, height]]); function generateSites() { return d3.range(nsites) .map(function(d) { return [Math.random() * width, Math.random() * height]; }); } function calculateCentroid(pts) { var x = 0; var y = 0; for (var i = 0; i < pts.length; i++) { x += pts[i][0]; y += pts[i][1]; } return [x/pts.length, y/pts.length]; } function loydIteration(sites, iterations) { iterations = iterations || 1; for (var i = 0; i < iterations; i++) { sites = voronoi(sites) .polygons() .map(calculateCentroid); } return sites; } var sites = loydIteration(generateSites(), 1); var diag = voronoi(sites); diag.sites = sites; return diag; } var mesh; var map; function calculateColor(h, x, y) { var color; var seaHeight = settings.heightScale * settings.seaLevel; var nh = h / (settings.heightScale - seaHeight); var p = Math.max(nh, 0.5) / 0xff; var humidity = (noiseGen.raw2D(10+x,10+y) + 1) / 2; var biome = { water: 0x1a3560, scorched: 0x999999, bare: 0xbbbbbb, tundra: 0xddddbb, snow: 0xf8f8f8, taiga: 0xccd4bb, shrubland: 0xc4ccbb, temperateDesert: 0xe4e8ca, temperateRainFlorest: 0xa4c4a8, temperateDeciduousFlorest: 0xb4c9a9, grassland: 0xc4d4aa, tropicalRainForest: 0x9cbba9, tropicalSeasonalForest: 0xa9cca4, subtropicalDesert: 0xe9ddc7 }; // first dimension is height, second is humidity var colorMap = [ // very dry, dry damp wet very wet drenched [ biome.subtropicalDesert, biome.grassland, biome.tropicalSeasonalForest, biome.tropicalRainForest, biome.tropicalRainForest, biome.tropicalRainForest], // height level 1 [ biome.temperateDesert, biome.grassland, biome.grassland, biome.temperateDeciduousFlorest, biome.temperateDeciduousFlorest, biome.temperateRainFlorest], // height level 2 [ biome.temperateDesert, biome.temperateDesert, biome.shrubland, biome.shrubland, biome.taiga, biome.snow], // height level 3 [ biome.scorched, biome.bare, biome.tundra, biome.snow, biome.snow, biome.snow]
{ if (!map) { return; } var cells = map.voronoi.cells; for (var i = 0; i < cells.length; i++) { var cell = cells[i]; var faces = cell.faces; for (var j = 0; j < faces.length; j++) { var f = faces[j]; var c = f.color; c.setHex(calculateColor(cell.height, cell.site.data[0], cell.site.data[1]).getHex()); } } mesh.map.geometry.dynamic = true; mesh.map.geometry.colorsNeedUpdate = true; }
identifier_body
render.js
length(); if (neighboorDistance <= distance) { walk(cells, edges, neighboorCell, distance - neighboorDistance); } } } } window.addEventListener("keyup", function (e) { var char = String.fromCharCode(e.keyCode); if (char == 'R') { newVoronoi(); } else if (char == 'T') { var x = Math.random(), y = Math.random(); var r = 0.05; if (map) { var site = map.voronoi.find(x, y); if (site) { var cell = map.voronoi.cells[site.index]; walk(map.voronoi.cells, map.voronoi.edges, cell, r); mesh.map.geometry.colorsNeedUpdate = true; } } } }); var lastMouse; var cameraTarget = new THREE.Vector3(0, 0, 0); window.addEventListener("mousemove", function (e) { e.preventDefault(); if ( e.buttons == 4 ) { var diff = cameraTarget.clone().sub(camera.position); diff.y = 0; var radious = diff.length(); if (!lastMouse) { lastMouse = { x:e.clientX, y:e.clientY, phi: camera.position.angleTo(new THREE.Vector3(0,0,1)), theta: Math.asin(camera.position.x / radious) }; } var theta = lastMouse.theta + ( ( e.clientY - lastMouse.y ) * Math.PI * 0.01 ); var phi = lastMouse.phi + ( ( e.clientX - lastMouse.x ) * 0.5 ); camera.position.x = radious * Math.sin( theta ); camera.position.z = radious * Math.cos( theta ); camera.lookAt(cameraTarget); camera.updateMatrix(); } else { lastMouse = null; } }); window.addEventListener("wheel", function (e) { var direction = cameraTarget.clone().sub(camera.position); var len = direction.length(); direction.normalize(); camera.position.add(direction.multiplyScalar(e.deltaY * -0.01 * Math.min(1, Math.pow(len, 5)))); }); //var rand = seedrandom("andre", { global: true }); var container = document.body; var scene = new THREE.Scene(); var camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 0.1, 1000 ); camera.position.set( 0, 2, -2 ); camera.lookAt(cameraTarget); scene.add( camera ); var light = new THREE.PointLight( 0xffffff, 0.8 ); camera.add( light ); var grid = new THREE.GridHelper(2, 20); scene.add(grid); var renderer = new THREE.WebGLRenderer( { antialias: true } ); renderer.setClearColor( 0xf0f0f0 ); renderer.setPixelRatio( window.devicePixelRatio ); renderer.setSize( window.innerWidth, window.innerHeight ); container.appendChild( renderer.domElement ); var stats = new Stats(); container.appendChild( stats.dom ); var RENDER = { biome: 0, height: 1, humidity: 2, height_X_humidity: 3 } function Settings() { this.seed = 0; this.sites = 4000; this.showPolygons = false; this.showSites = false;
this.seaLevel = 0.45; this.heightFrequency = 1.2; this.heightScale = 0.3; this.render = "biome"; } var gui = new dat.GUI(); var settings = new Settings(); gui.add(settings, "sites", 0).step(50).onFinishChange(newVoronoi); gui.add(settings, "heightScale", 0, 1).step(0.01); gui.add(settings, "heightFrequency").step(0.01).onFinishChange(newVoronoi); gui.add(settings, "showPolygons").onFinishChange(function(v) { if (mesh) { mesh.border.visible = v; } }); gui.add(settings, "showSites").onFinishChange(function(v) { if (mesh) { mesh.sites.visible = v; } });; gui.add(settings, "flat").onFinishChange(makeFlat); gui.add(settings, "seaLevel", 0, 1).step(0.01).onFinishChange(newVoronoi); gui.add(settings, "render", Object.keys(RENDER)).onFinishChange(changeRender); function changeRender() { if (!map) { return; } var cells = map.voronoi.cells; for (var i = 0; i < cells.length; i++) { var cell = cells[i]; var faces = cell.faces; for (var j = 0; j < faces.length; j++) { var f = faces[j]; var c = f.color; c.setHex(calculateColor(cell.height, cell.site.data[0], cell.site.data[1]).getHex()); } } mesh.map.geometry.dynamic = true; mesh.map.geometry.colorsNeedUpdate = true; } function makeFlat(f) { function mf(g) { if (!g.old) { g.old = g.vertices.map(function(v,i) { return v.y; }); } g.vertices.map(function (v, i) { if (f) { v.y = 0; } else { v.y = g.old[i]; } }); g.dynamic = g.verticesNeedUpdate = true; } if (mesh) { mf(mesh.map.geometry); mf(mesh.sites.geometry); mf(mesh.border.geometry); if (!f) { mesh.map.material = mesh.map.originalMaterial; } else { mesh.map.material = new THREE.MeshBasicMaterial( { side: THREE.FrontSide, vertexColors: THREE.FaceColors } ); } } } var width = 1; var height = 1; function createVoronoi(nsites) { var width = 1; var height = 1; var voronoi = d3.voronoi() .extent([[0, 0], [width, height]]); function generateSites() { return d3.range(nsites) .map(function(d) { return [Math.random() * width, Math.random() * height]; }); } function calculateCentroid(pts) { var x = 0; var y = 0; for (var i = 0; i < pts.length; i++) { x += pts[i][0]; y += pts[i][1]; } return [x/pts.length, y/pts.length]; } function loydIteration(sites, iterations) { iterations = iterations || 1; for (var i = 0; i < iterations; i++) { sites = voronoi(sites) .polygons() .map(calculateCentroid); } return sites; } var sites = loydIteration(generateSites(), 1); var diag = voronoi(sites); diag.sites = sites; return diag; } var mesh; var map; function calculateColor(h, x, y) { var color; var seaHeight = settings.heightScale * settings.seaLevel; var nh = h / (settings.heightScale - seaHeight); var p = Math.max(nh, 0.5) / 0xff; var humidity = (noiseGen.raw2D(10+x,10+y) + 1) / 2; var biome = { water: 0x1a3560, scorched: 0x999999, bare: 0xbbbbbb, tundra: 0xddddbb, snow: 0xf8f8f8, taiga: 0xccd4bb, shrubland: 0xc4ccbb, temperateDesert: 0xe4e8ca, temperateRainFlorest: 0xa4c4a8, temperateDeciduousFlorest: 0xb4c9a9, grassland: 0xc4d4aa, tropicalRainForest: 0x9cbba9, tropicalSeasonalForest: 0xa9cca4, subtropicalDesert: 0xe9ddc7 }; // first dimension is height, second is humidity var colorMap = [ // very dry, dry damp wet very wet drenched [ biome.subtropicalDesert, biome.grassland, biome.tropicalSeasonalForest, biome.tropicalRainForest, biome.tropicalRainForest, biome.tropicalRainForest], // height level 1 [ biome.temperateDesert, biome.grassland, biome.grassland, biome.temperateDeciduousFlorest, biome.temperateDeciduousFlorest, biome.temperateRainFlorest], // height level 2 [ biome.temperateDesert, biome.temperateDesert, biome.shrubland, biome.shrubland, biome.taiga, biome.snow], // height level 3 [ biome.scorched, biome.bare, biome.tundra, biome.snow, biome.snow, biome.snow] //
this.flat = false;
random_line_split
render.js
(); if (neighboorDistance <= distance) { walk(cells, edges, neighboorCell, distance - neighboorDistance); } } } } window.addEventListener("keyup", function (e) { var char = String.fromCharCode(e.keyCode); if (char == 'R') { newVoronoi(); } else if (char == 'T') { var x = Math.random(), y = Math.random(); var r = 0.05; if (map) { var site = map.voronoi.find(x, y); if (site) { var cell = map.voronoi.cells[site.index]; walk(map.voronoi.cells, map.voronoi.edges, cell, r); mesh.map.geometry.colorsNeedUpdate = true; } } } }); var lastMouse; var cameraTarget = new THREE.Vector3(0, 0, 0); window.addEventListener("mousemove", function (e) { e.preventDefault(); if ( e.buttons == 4 ) { var diff = cameraTarget.clone().sub(camera.position); diff.y = 0; var radious = diff.length(); if (!lastMouse) { lastMouse = { x:e.clientX, y:e.clientY, phi: camera.position.angleTo(new THREE.Vector3(0,0,1)), theta: Math.asin(camera.position.x / radious) }; } var theta = lastMouse.theta + ( ( e.clientY - lastMouse.y ) * Math.PI * 0.01 ); var phi = lastMouse.phi + ( ( e.clientX - lastMouse.x ) * 0.5 ); camera.position.x = radious * Math.sin( theta ); camera.position.z = radious * Math.cos( theta ); camera.lookAt(cameraTarget); camera.updateMatrix(); } else { lastMouse = null; } }); window.addEventListener("wheel", function (e) { var direction = cameraTarget.clone().sub(camera.position); var len = direction.length(); direction.normalize(); camera.position.add(direction.multiplyScalar(e.deltaY * -0.01 * Math.min(1, Math.pow(len, 5)))); }); //var rand = seedrandom("andre", { global: true }); var container = document.body; var scene = new THREE.Scene(); var camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 0.1, 1000 ); camera.position.set( 0, 2, -2 ); camera.lookAt(cameraTarget); scene.add( camera ); var light = new THREE.PointLight( 0xffffff, 0.8 ); camera.add( light ); var grid = new THREE.GridHelper(2, 20); scene.add(grid); var renderer = new THREE.WebGLRenderer( { antialias: true } ); renderer.setClearColor( 0xf0f0f0 ); renderer.setPixelRatio( window.devicePixelRatio ); renderer.setSize( window.innerWidth, window.innerHeight ); container.appendChild( renderer.domElement ); var stats = new Stats(); container.appendChild( stats.dom ); var RENDER = { biome: 0, height: 1, humidity: 2, height_X_humidity: 3 } function Settings() { this.seed = 0; this.sites = 4000; this.showPolygons = false; this.showSites = false; this.flat = false; this.seaLevel = 0.45; this.heightFrequency = 1.2; this.heightScale = 0.3; this.render = "biome"; } var gui = new dat.GUI(); var settings = new Settings(); gui.add(settings, "sites", 0).step(50).onFinishChange(newVoronoi); gui.add(settings, "heightScale", 0, 1).step(0.01); gui.add(settings, "heightFrequency").step(0.01).onFinishChange(newVoronoi); gui.add(settings, "showPolygons").onFinishChange(function(v) { if (mesh) { mesh.border.visible = v; } }); gui.add(settings, "showSites").onFinishChange(function(v) { if (mesh) { mesh.sites.visible = v; } });; gui.add(settings, "flat").onFinishChange(makeFlat); gui.add(settings, "seaLevel", 0, 1).step(0.01).onFinishChange(newVoronoi); gui.add(settings, "render", Object.keys(RENDER)).onFinishChange(changeRender); function changeRender() { if (!map) { return; } var cells = map.voronoi.cells; for (var i = 0; i < cells.length; i++) { var cell = cells[i]; var faces = cell.faces; for (var j = 0; j < faces.length; j++) { var f = faces[j]; var c = f.color; c.setHex(calculateColor(cell.height, cell.site.data[0], cell.site.data[1]).getHex()); } } mesh.map.geometry.dynamic = true; mesh.map.geometry.colorsNeedUpdate = true; } function
(f) { function mf(g) { if (!g.old) { g.old = g.vertices.map(function(v,i) { return v.y; }); } g.vertices.map(function (v, i) { if (f) { v.y = 0; } else { v.y = g.old[i]; } }); g.dynamic = g.verticesNeedUpdate = true; } if (mesh) { mf(mesh.map.geometry); mf(mesh.sites.geometry); mf(mesh.border.geometry); if (!f) { mesh.map.material = mesh.map.originalMaterial; } else { mesh.map.material = new THREE.MeshBasicMaterial( { side: THREE.FrontSide, vertexColors: THREE.FaceColors } ); } } } var width = 1; var height = 1; function createVoronoi(nsites) { var width = 1; var height = 1; var voronoi = d3.voronoi() .extent([[0, 0], [width, height]]); function generateSites() { return d3.range(nsites) .map(function(d) { return [Math.random() * width, Math.random() * height]; }); } function calculateCentroid(pts) { var x = 0; var y = 0; for (var i = 0; i < pts.length; i++) { x += pts[i][0]; y += pts[i][1]; } return [x/pts.length, y/pts.length]; } function loydIteration(sites, iterations) { iterations = iterations || 1; for (var i = 0; i < iterations; i++) { sites = voronoi(sites) .polygons() .map(calculateCentroid); } return sites; } var sites = loydIteration(generateSites(), 1); var diag = voronoi(sites); diag.sites = sites; return diag; } var mesh; var map; function calculateColor(h, x, y) { var color; var seaHeight = settings.heightScale * settings.seaLevel; var nh = h / (settings.heightScale - seaHeight); var p = Math.max(nh, 0.5) / 0xff; var humidity = (noiseGen.raw2D(10+x,10+y) + 1) / 2; var biome = { water: 0x1a3560, scorched: 0x999999, bare: 0xbbbbbb, tundra: 0xddddbb, snow: 0xf8f8f8, taiga: 0xccd4bb, shrubland: 0xc4ccbb, temperateDesert: 0xe4e8ca, temperateRainFlorest: 0xa4c4a8, temperateDeciduousFlorest: 0xb4c9a9, grassland: 0xc4d4aa, tropicalRainForest: 0x9cbba9, tropicalSeasonalForest: 0xa9cca4, subtropicalDesert: 0xe9ddc7 }; // first dimension is height, second is humidity var colorMap = [ // very dry, dry damp wet very wet drenched [ biome.subtropicalDesert, biome.grassland, biome.tropicalSeasonalForest, biome.tropicalRainForest, biome.tropicalRainForest, biome.tropicalRainForest], // height level 1 [ biome.temperateDesert, biome.grassland, biome.grassland, biome.temperateDeciduousFlorest, biome.temperateDeciduousFlorest, biome.temperateRainFlorest], // height level 2 [ biome.temperateDesert, biome.temperateDesert, biome.shrubland, biome.shrubland, biome.taiga, biome.snow], // height level 3 [ biome.scorched, biome.bare, biome.tundra, biome.snow, biome.snow, biome.snow] //
makeFlat
identifier_name
build_all.py
") as f: data = json.loads(strip_comments(f.read().decode("utf-8"))) value = data for part in json_path.split("."): if part in value: value = value[part] else: raise ValueError("'$.{}' not found in {}".format(json_path, file_path)) return value def get_dockerfile_md5(dockerfile_path): """ Get md5 check sum of a dockerfile, comments, empty line, tailing space are ignored. :param dockerfile_path: the absolute path of the Dockerfile :rtype: str """ valid_lines = list() with open(dockerfile_path, "rb") as f: lines = f.read().decode("utf-8").split("\n") for line in lines: line = line.rstrip() # ignore comment line if line.startswith("#"): continue # ignore empty line if not bool(line): continue # trim tailing comment if "#" in line: line = line[:-(line[::-1].index("#") + 1)].rstrip() if line: valid_lines.append(line) else: valid_lines.append(line) md5 = hashlib.md5() md5.update("\n".join(valid_lines).encode("utf-8")) return md5.hexdigest() DIR_HERE = os.path.abspath(os.path.dirname(__file__)) DIR_PROJECT_ROOT = DIR_HERE DIR_CICD = DIR_HERE DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos") # config file path PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"] PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json") DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"] IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) # --- Load Configs --- # detect runtime class Runtime: local = "local" circleci = "circleci" if os.environ.get("CIRCLECI"): runtime = Runtime.circleci else: runtime = Runtime.local # resolve config if runtime == Runtime.local: AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION") AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE") # set environment variable, allow pynamodb to detect credential os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE os.environ["AWS_DEFAULT_REGION"] = AWS_REGION try: DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD") except: DOCKER_HUB_PASSWORD = "" GIT_BRANCH = "" elif runtime == Runtime.circleci: AWS_REGION = os.environ["AWS_DEFAULT_REGION"] AWS_PROFILE = None DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"] GIT_BRANCH = os.environ["CIRCLE_BRANCH"] else: raise NotImplementedError logger = create_logger() docker_client = docker.from_env() # --- Image State DynamoDB backend class ImageModel(Model): class Meta: table_name = "docker-image-state" region = AWS_REGION identifier = UnicodeAttribute(hash_key=True) md5 = UnicodeAttribute() last_update = UnicodeAttribute() @property def last_update_datetime(self): """ datetime type of ``last_update`` """ return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f") dockerhub_username = DOCKER_HUB_USERNAME # type: str dir_repo_root = None # type: str dir_tag_root = None # type: str is_state_exists = None # type: bool _repo_name = None # type: str @property def repo_name(self): if self._repo_name is None: self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip() return self._repo_name _tag_name = None # type: str @property def tag_name(self): if self._tag_name is None: self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip() return self._tag_name @property def dockerfile_path(self): return os.path.join(self.dir_tag_root, "Dockerfile") def has_dockerfile(self): return os.path.exists(self.dockerfile_path) _dockerfile_md5 = None # type: str @property def dockerfile_md5(self): if self._dockerfile_md5 is None: self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path) return self._dockerfile_md5 @property def local_identifier(self): return f"{self.repo_name}:{self.tag_name}" @property def dockerhub_identifier(self): return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}" @property def awsecr_identifier(self): raise NotImplementedError @property def smoke_test_script_path(self): return os.path.join(self.dir_tag_root, "smoke-test.sh") def run_docker_build(self): """ :rtype: bool :return: """ logger.info(f"Build docker image in context at {self.dir_tag_root} ...") try: run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root]) self.last_update = str(datetime.utcnow()) logger.info(" Build success!") return True except subprocess.CalledProcessError as e: logger.info(" Build failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_smoke_test(self): """ :rtype: bool :return: """ logger.info(f"Run smoke test script {self.smoke_test_script_path}...") try: run_and_log_command(["bash", self.smoke_test_script_path]) logger.info(" Test passed!") return True except subprocess.CalledProcessError as e: logger.info(" Test failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_docker_push(self, docker_client): logger.info(f"Push docker image {self.identifier} ...") if REGISTRY_SERVICE == "dockerhub": remote_identifier = self.dockerhub_identifier elif REGISTRY_SERVICE == "awsecr": remote_identifier = self.awsecr_identifier else: raise ValueError try: run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier]) docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name) logger.info(" Success!") if self.is_state_exists: self.update( actions=[ ImageModel.md5.set(self.md5), ImageModel.last_update.set(self.last_update) ] ) else: self.save() return True except subprocess.CalledProcessError as e: logger.info(" Push failed!") logger.info(" {}".format(e)) return False except Exception as e: logger.info(" {}".format(e)) return False ImageModel.create_table(billing_mode="PAY_PER_REQUEST") def plan_image_to_build(): """ :rtype: typing.List[ImageModel] :return: """ logger.info("Scan code repo to scheduler docker build ...") image_list = list() for repo_folder in os.listdir(DIR_REPOS): dir_repo_root = os.path.join(DIR_REPOS, repo_folder) if not os.path.isdir(dir_repo_root): continue for tag_folder in os.listdir(dir_repo_root): dir_tag_root = os.path.join(dir_repo_root, tag_folder) image = ImageModel() image.dir_repo_root = dir_repo_root image.dir_tag_root = dir_tag_root if not image.has_dockerfile(): continue logger.info(f" Detected '{image.local_identifier}' image") try: _image = ImageModel.get(image.local_identifier) # type: ImageModel image.identifier = _image.identifier image.md5 = _image.md5 image.last_update = _image.last_update _image.is_state_exists = True if image.md5 == image.dockerfile_md5: if (datetime.utcnow() - image.last_update_datetime).total_seconds() > IMAGE_REBUILD_INTERVAL: is_todo = True logger.info( " Dockerfile not changed, but it is out dated " "due to the IMAGE_REBUILD_INTERVAL setting, we need to build this one") else: is_todo = False logger.info(" Dockerfile not changed, and not beyond the IMAGE_REBUILD_INTERVAL setting") logger.info(" skip this image") else: is_todo = True logger.info(" Dockerfile has changed, we need to rebuild the image") except DoesNotExist: logger.info(" State not exists, we need to build this one") is_todo = True image.identifier = image.local_identifier image.md5 = image.dockerfile_md5 image.is_state_exists = False except Exception as e: raise e if is_todo:
image_list.append(image)
conditional_block
build_all.py
file_path = os.path.abspath(os.path.join(cwd, file_path)) # fix json_path if json_path.startswith("$."): json_path = json_path.replace("$.", "", 1) with open(file_path, "rb") as f: data = json.loads(strip_comments(f.read().decode("utf-8"))) value = data for part in json_path.split("."): if part in value: value = value[part] else: raise ValueError("'$.{}' not found in {}".format(json_path, file_path)) return value def get_dockerfile_md5(dockerfile_path): """ Get md5 check sum of a dockerfile, comments, empty line, tailing space are ignored. :param dockerfile_path: the absolute path of the Dockerfile :rtype: str """ valid_lines = list() with open(dockerfile_path, "rb") as f: lines = f.read().decode("utf-8").split("\n") for line in lines: line = line.rstrip() # ignore comment line if line.startswith("#"): continue # ignore empty line if not bool(line): continue # trim tailing comment if "#" in line: line = line[:-(line[::-1].index("#") + 1)].rstrip() if line: valid_lines.append(line) else: valid_lines.append(line) md5 = hashlib.md5() md5.update("\n".join(valid_lines).encode("utf-8")) return md5.hexdigest() DIR_HERE = os.path.abspath(os.path.dirname(__file__)) DIR_PROJECT_ROOT = DIR_HERE DIR_CICD = DIR_HERE DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos") # config file path PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"] PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json") DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"] IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) # --- Load Configs --- # detect runtime class Runtime: local = "local" circleci = "circleci" if os.environ.get("CIRCLECI"): runtime = Runtime.circleci else: runtime = Runtime.local # resolve config if runtime == Runtime.local: AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION") AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE") # set environment variable, allow pynamodb to detect credential os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE os.environ["AWS_DEFAULT_REGION"] = AWS_REGION try: DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD") except: DOCKER_HUB_PASSWORD = "" GIT_BRANCH = "" elif runtime == Runtime.circleci: AWS_REGION = os.environ["AWS_DEFAULT_REGION"] AWS_PROFILE = None DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"] GIT_BRANCH = os.environ["CIRCLE_BRANCH"] else: raise NotImplementedError logger = create_logger() docker_client = docker.from_env() # --- Image State DynamoDB backend class ImageModel(Model): class Meta: table_name = "docker-image-state" region = AWS_REGION identifier = UnicodeAttribute(hash_key=True) md5 = UnicodeAttribute() last_update = UnicodeAttribute() @property def last_update_datetime(self): """ datetime type of ``last_update`` """ return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f") dockerhub_username = DOCKER_HUB_USERNAME # type: str dir_repo_root = None # type: str dir_tag_root = None # type: str is_state_exists = None # type: bool _repo_name = None # type: str @property def repo_name(self): if self._repo_name is None: self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip() return self._repo_name _tag_name = None # type: str @property def tag_name(self): if self._tag_name is None: self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip() return self._tag_name @property def dockerfile_path(self): return os.path.join(self.dir_tag_root, "Dockerfile") def has_dockerfile(self): return os.path.exists(self.dockerfile_path) _dockerfile_md5 = None # type: str @property def dockerfile_md5(self): if self._dockerfile_md5 is None: self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path) return self._dockerfile_md5 @property def local_identifier(self): return f"{self.repo_name}:{self.tag_name}" @property def dockerhub_identifier(self): return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}" @property def awsecr_identifier(self): raise NotImplementedError @property def smoke_test_script_path(self): return os.path.join(self.dir_tag_root, "smoke-test.sh") def run_docker_build(self): """ :rtype: bool :return: """ logger.info(f"Build docker image in context at {self.dir_tag_root} ...") try: run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root]) self.last_update = str(datetime.utcnow()) logger.info(" Build success!") return True except subprocess.CalledProcessError as e: logger.info(" Build failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_smoke_test(self): """ :rtype: bool :return: """ logger.info(f"Run smoke test script {self.smoke_test_script_path}...") try: run_and_log_command(["bash", self.smoke_test_script_path]) logger.info(" Test passed!") return True except subprocess.CalledProcessError as e: logger.info(" Test failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_docker_push(self, docker_client): logger.info(f"Push docker image {self.identifier} ...") if REGISTRY_SERVICE == "dockerhub": remote_identifier = self.dockerhub_identifier elif REGISTRY_SERVICE == "awsecr": remote_identifier = self.awsecr_identifier else: raise ValueError try: run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier]) docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name) logger.info(" Success!") if self.is_state_exists: self.update( actions=[ ImageModel.md5.set(self.md5), ImageModel.last_update.set(self.last_update) ] ) else: self.save() return True except subprocess.CalledProcessError as e: logger.info(" Push failed!") logger.info(" {}".format(e)) return False except Exception as e: logger.info(" {}".format(e)) return False ImageModel.create_table(billing_mode="PAY_PER_REQUEST") def plan_image_to_build(): """ :rtype: typing.List[ImageModel] :return: """ logger.info("Scan code repo to scheduler docker build ...") image_list = list() for repo_folder in os.listdir(DIR_REPOS): dir_repo_root = os.path.join(DIR_REPOS, repo_folder) if not os.path.isdir(dir_repo_root): continue for tag_folder in os.listdir(dir_repo_root): dir_tag_root = os.path.join(dir_repo_root, tag_folder) image = ImageModel() image.dir_repo_root = dir_repo_root image.dir_tag_root = dir_tag_root if not image.has_dockerfile(): continue logger.info(f" Detected '{image.local_identifier}' image") try: _image = ImageModel.get(image.local_identifier) # type: ImageModel image.identifier = _image.identifier image.md5 = _image.md5 image.last_update = _image.last_update _image.is_state_exists = True if image.md5 == image.dockerfile_md5: if (datetime.utcnow() - image.last_update_datetime).total_seconds() > IMAGE_REBUILD_INTERVAL: is_todo = True logger.info( " Dockerfile not changed, but it is out dated " "due to the IMAGE_REBUILD_INTERVAL setting, we need to build this one") else: is_todo = False logger.info(" Dockerfile not changed, and not beyond the IMAGE_REBUILD_INTERVAL setting") logger.info(" skip this image") else: is_todo = True logger.info(" Dockerfile has changed, we need to rebuild the image") except DoesNotExist: logger.info(" State not exists, we need to build this one") is_todo = True
random_line_split
build_all.py
(string, comment_symbols=frozenset(('#', '//'))): """ Strip comments from json string. :param string: A string containing json with comments started by comment_symbols. :param comment_symbols: Iterable of symbols that start a line comment (default # or //). :return: The string with the comments removed. """ lines = string.splitlines() for k in range(len(lines)): for symbol in comment_symbols: lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol) return '\n'.join(lines) def read_json(file_path): """ Read dict data from json file :type file_path: str :rtype: dict """ return json.loads(strip_comments(read_text(file_path))) def get_json_value(file_path, json_path): """ Read specific field from JSON file. :type file_path: str :param file_path: the absolute path for a json file :type json_path: str :param json_path: json path notation. """ # find absolute path cwd = os.getcwd() if not os.path.isabs(file_path): file_path = os.path.abspath(os.path.join(cwd, file_path)) # fix json_path if json_path.startswith("$."): json_path = json_path.replace("$.", "", 1) with open(file_path, "rb") as f: data = json.loads(strip_comments(f.read().decode("utf-8"))) value = data for part in json_path.split("."): if part in value: value = value[part] else: raise ValueError("'$.{}' not found in {}".format(json_path, file_path)) return value def get_dockerfile_md5(dockerfile_path): """ Get md5 check sum of a dockerfile, comments, empty line, tailing space are ignored. :param dockerfile_path: the absolute path of the Dockerfile :rtype: str """ valid_lines = list() with open(dockerfile_path, "rb") as f: lines = f.read().decode("utf-8").split("\n") for line in lines: line = line.rstrip() # ignore comment line if line.startswith("#"): continue # ignore empty line if not bool(line): continue # trim tailing comment if "#" in line: line = line[:-(line[::-1].index("#") + 1)].rstrip() if line: valid_lines.append(line) else: valid_lines.append(line) md5 = hashlib.md5() md5.update("\n".join(valid_lines).encode("utf-8")) return md5.hexdigest() DIR_HERE = os.path.abspath(os.path.dirname(__file__)) DIR_PROJECT_ROOT = DIR_HERE DIR_CICD = DIR_HERE DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos") # config file path PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"] PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json") DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"] IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) # --- Load Configs --- # detect runtime class Runtime: local = "local" circleci = "circleci" if os.environ.get("CIRCLECI"): runtime = Runtime.circleci else: runtime = Runtime.local # resolve config if runtime == Runtime.local: AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION") AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE") # set environment variable, allow pynamodb to detect credential os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE os.environ["AWS_DEFAULT_REGION"] = AWS_REGION try: DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD") except: DOCKER_HUB_PASSWORD = "" GIT_BRANCH = "" elif runtime == Runtime.circleci: AWS_REGION = os.environ["AWS_DEFAULT_REGION"] AWS_PROFILE = None DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"] GIT_BRANCH = os.environ["CIRCLE_BRANCH"] else: raise NotImplementedError logger = create_logger() docker_client = docker.from_env() # --- Image State DynamoDB backend class ImageModel(Model): class Meta: table_name = "docker-image-state" region = AWS_REGION identifier = UnicodeAttribute(hash_key=True) md5 = UnicodeAttribute() last_update = UnicodeAttribute() @property def last_update_datetime(self): """ datetime type of ``last_update`` """ return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f") dockerhub_username = DOCKER_HUB_USERNAME # type: str dir_repo_root = None # type: str dir_tag_root = None # type: str is_state_exists = None # type: bool _repo_name = None # type: str @property def repo_name(self): if self._repo_name is None: self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip() return self._repo_name _tag_name = None # type: str @property def tag_name(self): if self._tag_name is None: self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip() return self._tag_name @property def dockerfile_path(self): return os.path.join(self.dir_tag_root, "Dockerfile") def has_dockerfile(self): return os.path.exists(self.dockerfile_path) _dockerfile_md5 = None # type: str @property def dockerfile_md5(self): if self._dockerfile_md5 is None: self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path) return self._dockerfile_md5 @property def local_identifier(self): return f"{self.repo_name}:{self.tag_name}" @property def dockerhub_identifier(self): return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}" @property def awsecr_identifier(self): raise NotImplementedError @property def smoke_test_script_path(self): return os.path.join(self.dir_tag_root, "smoke-test.sh") def run_docker_build(self): """ :rtype: bool :return: """ logger.info(f"Build docker image in context at {self.dir_tag_root} ...") try: run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root]) self.last_update = str(datetime.utcnow()) logger.info(" Build success!") return True except subprocess.CalledProcessError as e: logger.info(" Build failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_smoke_test(self): """ :rtype: bool :return: """ logger.info(f"Run smoke test script {self.smoke_test_script_path}...") try: run_and_log_command(["bash", self.smoke_test_script_path]) logger.info(" Test passed!") return True except subprocess.CalledProcessError as e: logger.info(" Test failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_docker_push(self, docker_client): logger.info(f"Push docker image {self.identifier} ...") if REGISTRY_SERVICE == "dockerhub": remote_identifier = self.dockerhub_identifier elif REGISTRY_SERVICE == "awsecr": remote_identifier = self.awsecr_identifier else: raise ValueError try: run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier]) docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name) logger.info(" Success!") if self.is_state_exists: self.update( actions=[ ImageModel.md5.set(self.md5), ImageModel.last_update.set(self.last_update) ] ) else: self.save() return True except subprocess.CalledProcessError as e: logger.info(" Push failed!") logger.info(" {}".format(e)) return False except Exception as e: logger.info(" {}".format(e)) return False ImageModel.create_table(billing_mode="PAY_PER_REQUEST") def plan_image_to_build(): """ :rtype: typing.List[ImageModel] :return: """ logger.info("Scan code repo to scheduler docker build ...") image_list = list() for repo_folder in os.listdir(DIR_REPOS): dir_repo_root = os.path.join(DIR_REPOS, repo_folder) if not os.path.isdir(dir_repo_root): continue for tag_folder in os.listdir(dir_repo_root): dir_tag_root = os.path.join(dir_repo_root, tag_folder) image = ImageModel() image.dir_repo_root = dir_repo_root image.dir_tag_root = dir_tag_root
strip_comments
identifier_name
build_all.py
: Iterable of symbols that start a line comment (default # or //). :return: The string with the comments removed. """ lines = string.splitlines() for k in range(len(lines)): for symbol in comment_symbols: lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol) return '\n'.join(lines) def read_json(file_path): """ Read dict data from json file :type file_path: str :rtype: dict """ return json.loads(strip_comments(read_text(file_path))) def get_json_value(file_path, json_path): """ Read specific field from JSON file. :type file_path: str :param file_path: the absolute path for a json file :type json_path: str :param json_path: json path notation. """ # find absolute path cwd = os.getcwd() if not os.path.isabs(file_path): file_path = os.path.abspath(os.path.join(cwd, file_path)) # fix json_path if json_path.startswith("$."): json_path = json_path.replace("$.", "", 1) with open(file_path, "rb") as f: data = json.loads(strip_comments(f.read().decode("utf-8"))) value = data for part in json_path.split("."): if part in value: value = value[part] else: raise ValueError("'$.{}' not found in {}".format(json_path, file_path)) return value def get_dockerfile_md5(dockerfile_path): """ Get md5 check sum of a dockerfile, comments, empty line, tailing space are ignored. :param dockerfile_path: the absolute path of the Dockerfile :rtype: str """ valid_lines = list() with open(dockerfile_path, "rb") as f: lines = f.read().decode("utf-8").split("\n") for line in lines: line = line.rstrip() # ignore comment line if line.startswith("#"): continue # ignore empty line if not bool(line): continue # trim tailing comment if "#" in line: line = line[:-(line[::-1].index("#") + 1)].rstrip() if line: valid_lines.append(line) else: valid_lines.append(line) md5 = hashlib.md5() md5.update("\n".join(valid_lines).encode("utf-8")) return md5.hexdigest() DIR_HERE = os.path.abspath(os.path.dirname(__file__)) DIR_PROJECT_ROOT = DIR_HERE DIR_CICD = DIR_HERE DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos") # config file path PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"] PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json") DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"] IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL") GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG) # --- Load Configs --- # detect runtime class Runtime: local = "local" circleci = "circleci" if os.environ.get("CIRCLECI"): runtime = Runtime.circleci else: runtime = Runtime.local # resolve config if runtime == Runtime.local: AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION") AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE") # set environment variable, allow pynamodb to detect credential os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE os.environ["AWS_DEFAULT_REGION"] = AWS_REGION try: DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD") except: DOCKER_HUB_PASSWORD = "" GIT_BRANCH = "" elif runtime == Runtime.circleci: AWS_REGION = os.environ["AWS_DEFAULT_REGION"] AWS_PROFILE = None DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"] GIT_BRANCH = os.environ["CIRCLE_BRANCH"] else: raise NotImplementedError logger = create_logger() docker_client = docker.from_env() # --- Image State DynamoDB backend class ImageModel(Model): class Meta: table_name = "docker-image-state" region = AWS_REGION identifier = UnicodeAttribute(hash_key=True) md5 = UnicodeAttribute() last_update = UnicodeAttribute() @property def last_update_datetime(self):
dockerhub_username = DOCKER_HUB_USERNAME # type: str dir_repo_root = None # type: str dir_tag_root = None # type: str is_state_exists = None # type: bool _repo_name = None # type: str @property def repo_name(self): if self._repo_name is None: self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip() return self._repo_name _tag_name = None # type: str @property def tag_name(self): if self._tag_name is None: self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip() return self._tag_name @property def dockerfile_path(self): return os.path.join(self.dir_tag_root, "Dockerfile") def has_dockerfile(self): return os.path.exists(self.dockerfile_path) _dockerfile_md5 = None # type: str @property def dockerfile_md5(self): if self._dockerfile_md5 is None: self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path) return self._dockerfile_md5 @property def local_identifier(self): return f"{self.repo_name}:{self.tag_name}" @property def dockerhub_identifier(self): return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}" @property def awsecr_identifier(self): raise NotImplementedError @property def smoke_test_script_path(self): return os.path.join(self.dir_tag_root, "smoke-test.sh") def run_docker_build(self): """ :rtype: bool :return: """ logger.info(f"Build docker image in context at {self.dir_tag_root} ...") try: run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root]) self.last_update = str(datetime.utcnow()) logger.info(" Build success!") return True except subprocess.CalledProcessError as e: logger.info(" Build failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_smoke_test(self): """ :rtype: bool :return: """ logger.info(f"Run smoke test script {self.smoke_test_script_path}...") try: run_and_log_command(["bash", self.smoke_test_script_path]) logger.info(" Test passed!") return True except subprocess.CalledProcessError as e: logger.info(" Test failed!") logger.info(" {}".format(e)) return False except Exception: return False def run_docker_push(self, docker_client): logger.info(f"Push docker image {self.identifier} ...") if REGISTRY_SERVICE == "dockerhub": remote_identifier = self.dockerhub_identifier elif REGISTRY_SERVICE == "awsecr": remote_identifier = self.awsecr_identifier else: raise ValueError try: run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier]) docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name) logger.info(" Success!") if self.is_state_exists: self.update( actions=[ ImageModel.md5.set(self.md5), ImageModel.last_update.set(self.last_update) ] ) else: self.save() return True except subprocess.CalledProcessError as e: logger.info(" Push failed!") logger.info(" {}".format(e)) return False except Exception as e: logger.info(" {}".format(e)) return False ImageModel.create_table(billing_mode="PAY_PER_REQUEST") def plan_image_to_build(): """ :rtype: typing.List[ImageModel] :return: """ logger.info("Scan code repo to scheduler docker build ...") image_list = list() for repo_folder in os.listdir(DIR_REPOS): dir_repo_root = os.path.join(DIR_REPOS, repo_folder) if not os.path.isdir(dir_repo_root): continue for tag_folder in os.listdir(dir_repo_root): dir_tag_root = os.path.join(dir_repo_root, tag_folder) image = ImageModel() image.dir_repo_root = dir_repo_root image.dir_tag_root = dir_tag_root if not image.has_dockerfile(): continue logger.info(f" Detected '{image.local_identifier}' image") try: _image = ImageModel.get(image.local_identifier) # type: Image
""" datetime type of ``last_update`` """ return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f")
identifier_body
admission_test.go
("", "") if err != nil { t.Fatalf("Unexpected error while creating temporary file: %v", err) } p := tempfile.Name() defer os.Remove(p) kubeconfig := ` clusters: - name: foo cluster: server: https://example.com users: - name: alice user: token: deadbeef contexts: - name: default context: cluster: foo user: alice current-context: default ` if _, err := tempfile.WriteString(kubeconfig); err != nil { t.Fatalf("Unexpected error while writing test kubeconfig file: %v", err) } tests := []struct { note string input string wantErr bool }{ {"no config", "", true}, {"bad json", `{"foo": `, true}, {"bad yaml", `{foo" `, true}, { "missing kubeconfig", `{"foo": {}}`, true, }, { "kubeconfig not found", `{ "kubeconfig": "/kube-federation-scheduling-policy-file-not-found-test" }`, true, }, { "bad retry backoff", fmt.Sprintf(` { "kubeconfig": %q, "retryBackoff": -1 } `, p), true, }, { "a valid config", fmt.Sprintf(` { "kubeconfig": %q } `, p), false, }, { "a valid config with retry backoff", fmt.Sprintf(` { "kubeconfig": %q, "retryBackoff": 200 } `, p), false, }, } for _, tc := range tests { var file io.Reader if tc.input == "" { file = nil } else { file = bytes.NewBufferString(tc.input) } _, err := newAdmissionController(file) if tc.wantErr && err == nil { t.Errorf("%v: Expected error", tc.note) } else if !tc.wantErr && err != nil { t.Errorf("%v: Unexpected error: %v", tc.note, err) } } } func TestAdmitQueryPayload(t *testing.T) { var body interface{} serve := func(w http.ResponseWriter, r *http.Request) { if err := json.NewDecoder(r.Body).Decode(&body); err != nil { t.Fatalf("Unexpected error reading admission payload: %v", err) } // No errors or annotations. w.Write([]byte(`{}`)) } controller, err := newControllerWithTestServer(serve, true) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } rs := makeReplicaSet() rs.Spec.MinReadySeconds = 100 attrs := makeAdmissionRecord(rs) err = controller.Admit(attrs) if err != nil { t.Fatalf("Unexpected error from admission controller: %v", err) } obj := body.(map[string]interface{}) metadata := obj["metadata"].(map[string]interface{}) spec := obj["spec"].(map[string]interface{}) name := metadata["name"].(string) minReadySeconds := spec["minReadySeconds"].(float64) expectedName := "myapp" if name != expectedName { t.Fatalf("Expected replicaset.metadata.name to be %v but got: %v", expectedName, name) } expectedMinReadySeconds := float64(100) if minReadySeconds != expectedMinReadySeconds { t.Fatalf("Expected replicaset.spec.minReadySeconds to be %v but got: %v", expectedMinReadySeconds, minReadySeconds) } } func TestAdmitFailInternal(t *testing.T) { serve := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) } controller, err := newControllerWithTestServer(serve, false) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } mockClient := &fake.Clientset{} mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("unknown error") }) controller.SetInternalKubeClientSet(mockClient) attrs := makeAdmissionRecord(makeReplicaSet()) err = controller.Admit(attrs) if err == nil { t.Fatalf("Expected admission controller to fail closed") } } func TestAdmitPolicyDoesNotExist(t *testing.T) { controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) }, false) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } attrs := makeAdmissionRecord(makeReplicaSet()) err = controller.Admit(attrs) if err != nil { t.Fatalf("Expected admission controller to fail open but got error: %v", err) } } func TestAdmitFailClosed(t *testing.T) { tests := []struct { note string statusCode int body string }{ {"server error", 500, ""}, {"unmarshal error", 200, "{"}, {"undefined result", 404, ``}, {"policy errors", 200, `{"errors": ["conflicting replica-set-preferences"]}`}, } for _, tc := range tests { serve := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(tc.statusCode) if len(tc.body) > 0
} controller, err := newControllerWithTestServer(serve, true) if err != nil { t.Errorf("%v: Unexpected error while creating test admission controller/server: %v", tc.note, err) continue } obj := makeReplicaSet() attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{}) err = controller.Admit(attrs) if err == nil { t.Errorf("%v: Expected admission controller to fail closed", tc.note) } } } func TestAdmitRetries(t *testing.T) { var numQueries int controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(500) numQueries++ }, true) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } err = controller.Admit(makeAdmissionRecord(makeReplicaSet())) if err == nil { t.Fatalf("Expected admission controller to fail closed") } if numQueries <= 1 { t.Fatalf("Expected multiple queries/retries but got (numQueries): %v", numQueries) } } func TestAdmitSuccessWithAnnotationMerge(t *testing.T) { controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(` { "annotations": { "foo": "bar-2" } } `)) }, true) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } obj := makeReplicaSet() obj.Annotations = map[string]string{} obj.Annotations["foo"] = "bar" obj.Annotations["bar"] = "baz" attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{}) err = controller.Admit(attrs) if err != nil { t.Fatalf("Unexpected error from admission controller: %v", err) } annotations := attrs.GetObject().(*extensionsv1.ReplicaSet).Annotations expected := map[string]string{ "foo": "bar-2", "bar": "baz", } if !reflect.DeepEqual(annotations, expected) { t.Fatalf("Expected annotations to be %v but got: %v", expected, annotations) } } func newControllerWithTestServer(f func(w http.ResponseWriter, r *http.Request), policiesExist bool) (*admissionController, error) { server, err := newTestServer(f) if err != nil { return nil, err } kubeConfigFile, err := makeKubeConfigFile(server.URL, "/some/path/to/decision") if err != nil { return nil, err } defer os.Remove(kubeConfigFile) configFile, err := makeAdmissionControlConfigFile(kubeConfigFile) if err != nil { return nil, err } defer os.Remove(configFile) file, err := os.Open(configFile) if err != nil { return nil, err } controller, err := newAdmissionController(file) if err != nil { return nil, err } mockClient := &fake.Clientset{} var items []api.ConfigMap if policiesExist { items = append(items, api.Config
{ w.Write([]byte(tc.body)) }
conditional_block
admission_test.go
("", "") if err != nil { t.Fatalf("Unexpected error while creating temporary file: %v", err) } p := tempfile.Name() defer os.Remove(p) kubeconfig := ` clusters: - name: foo cluster: server: https://example.com users: - name: alice user: token: deadbeef contexts: - name: default context: cluster: foo user: alice current-context: default ` if _, err := tempfile.WriteString(kubeconfig); err != nil { t.Fatalf("Unexpected error while writing test kubeconfig file: %v", err) } tests := []struct { note string input string wantErr bool }{ {"no config", "", true}, {"bad json", `{"foo": `, true}, {"bad yaml", `{foo" `, true}, { "missing kubeconfig", `{"foo": {}}`, true, }, { "kubeconfig not found", `{ "kubeconfig": "/kube-federation-scheduling-policy-file-not-found-test" }`, true, }, { "bad retry backoff", fmt.Sprintf(` { "kubeconfig": %q, "retryBackoff": -1 } `, p), true, }, { "a valid config", fmt.Sprintf(` { "kubeconfig": %q } `, p), false, }, { "a valid config with retry backoff", fmt.Sprintf(` { "kubeconfig": %q, "retryBackoff": 200 } `, p), false, }, } for _, tc := range tests { var file io.Reader if tc.input == "" { file = nil } else { file = bytes.NewBufferString(tc.input) } _, err := newAdmissionController(file) if tc.wantErr && err == nil { t.Errorf("%v: Expected error", tc.note) } else if !tc.wantErr && err != nil { t.Errorf("%v: Unexpected error: %v", tc.note, err) } } } func TestAdmitQueryPayload(t *testing.T) { var body interface{} serve := func(w http.ResponseWriter, r *http.Request) { if err := json.NewDecoder(r.Body).Decode(&body); err != nil { t.Fatalf("Unexpected error reading admission payload: %v", err) } // No errors or annotations. w.Write([]byte(`{}`)) } controller, err := newControllerWithTestServer(serve, true) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } rs := makeReplicaSet() rs.Spec.MinReadySeconds = 100 attrs := makeAdmissionRecord(rs) err = controller.Admit(attrs) if err != nil { t.Fatalf("Unexpected error from admission controller: %v", err) } obj := body.(map[string]interface{}) metadata := obj["metadata"].(map[string]interface{}) spec := obj["spec"].(map[string]interface{}) name := metadata["name"].(string) minReadySeconds := spec["minReadySeconds"].(float64) expectedName := "myapp" if name != expectedName { t.Fatalf("Expected replicaset.metadata.name to be %v but got: %v", expectedName, name) } expectedMinReadySeconds := float64(100) if minReadySeconds != expectedMinReadySeconds { t.Fatalf("Expected replicaset.spec.minReadySeconds to be %v but got: %v", expectedMinReadySeconds, minReadySeconds) } } func TestAdmitFailInternal(t *testing.T) { serve := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) } controller, err := newControllerWithTestServer(serve, false) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } mockClient := &fake.Clientset{} mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, fmt.Errorf("unknown error") }) controller.SetInternalKubeClientSet(mockClient) attrs := makeAdmissionRecord(makeReplicaSet()) err = controller.Admit(attrs) if err == nil { t.Fatalf("Expected admission controller to fail closed") } } func TestAdmitPolicyDoesNotExist(t *testing.T) { controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) }, false) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } attrs := makeAdmissionRecord(makeReplicaSet()) err = controller.Admit(attrs) if err != nil { t.Fatalf("Expected admission controller to fail open but got error: %v", err) } } func TestAdmitFailClosed(t *testing.T) { tests := []struct { note string statusCode int body string }{ {"server error", 500, ""}, {"unmarshal error", 200, "{"}, {"undefined result", 404, ``}, {"policy errors", 200, `{"errors": ["conflicting replica-set-preferences"]}`}, } for _, tc := range tests { serve := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(tc.statusCode) if len(tc.body) > 0 { w.Write([]byte(tc.body)) } } controller, err := newControllerWithTestServer(serve, true) if err != nil { t.Errorf("%v: Unexpected error while creating test admission controller/server: %v", tc.note, err) continue } obj := makeReplicaSet() attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{}) err = controller.Admit(attrs) if err == nil { t.Errorf("%v: Expected admission controller to fail closed", tc.note) } } } func TestAdmitRetries(t *testing.T) { var numQueries int controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(500) numQueries++ }, true) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } err = controller.Admit(makeAdmissionRecord(makeReplicaSet())) if err == nil { t.Fatalf("Expected admission controller to fail closed") } if numQueries <= 1 { t.Fatalf("Expected multiple queries/retries but got (numQueries): %v", numQueries) } } func TestAdmitSuccessWithAnnotationMerge(t *testing.T) { controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(` { "annotations": { "foo": "bar-2" } } `)) }, true) if err != nil { t.Fatalf("Unexpected error while creating test admission controller/server: %v", err) } obj := makeReplicaSet() obj.Annotations = map[string]string{} obj.Annotations["foo"] = "bar" obj.Annotations["bar"] = "baz" attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{}) err = controller.Admit(attrs) if err != nil { t.Fatalf("Unexpected error from admission controller: %v", err) } annotations := attrs.GetObject().(*extensionsv1.ReplicaSet).Annotations expected := map[string]string{ "foo": "bar-2", "bar": "baz", } if !reflect.DeepEqual(annotations, expected) { t.Fatalf("Expected annotations to be %v but got: %v", expected, annotations) } } func newControllerWithTestServer(f func(w http.ResponseWriter, r *http.Request), policiesExist bool) (*admissionController, error) { server, err := newTestServer(f) if err != nil { return nil, err } kubeConfigFile, err := makeKubeConfigFile(server.URL, "/some/path/to/decision") if err != nil { return nil, err
configFile, err := makeAdmissionControlConfigFile(kubeConfigFile) if err != nil { return nil, err } defer os.Remove(configFile) file, err := os.Open(configFile) if err != nil { return nil, err } controller, err := newAdmissionController(file) if err != nil { return nil, err } mockClient := &fake.Clientset{} var items []api.ConfigMap if policiesExist { items = append(items, api.ConfigMap{})
} defer os.Remove(kubeConfigFile)
random_line_split