file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
run.go | // Licensed to Apache Software Foundation (ASF) under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Apache Software Foundation (ASF) licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Package run implements a lifecycle framework to control modules.
package run
import (
"fmt"
"os"
"path"
"sync"
"github.com/oklog/run"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"go.uber.org/multierr"
"github.com/apache/skywalking-banyandb/pkg/config"
"github.com/apache/skywalking-banyandb/pkg/logger"
)
// FlagSet holds a pflag.FlagSet as well as an exported Name variable for
// allowing improved help usage information.
type FlagSet struct {
*pflag.FlagSet
Name string
}
// NewFlagSet returns a new FlagSet for usage in Config objects.
func NewFlagSet(name string) *FlagSet {
return &FlagSet{
FlagSet: pflag.NewFlagSet(name, pflag.ContinueOnError),
Name: name,
}
}
// Unit is the default interface an object needs to implement for it to be able
// to register with a Group.
// Name should return a short but good identifier of the Unit.
type Unit interface {
Name() string
}
// Config interface should be implemented by Group Unit objects that manage
// their own configuration through the use of flags.
// If a Unit's Validate returns an error it will stop the Group immediately.
type Config interface {
// Unit for Group registration and identification
Unit
// FlagSet returns an object's FlagSet
FlagSet() *FlagSet
// Validate checks an object's stored values
Validate() error
}
// PreRunner interface should be implemented by Group Unit objects that need
// a pre run stage before starting the Group Services.
// If a Unit's PreRun returns an error it will stop the Group immediately.
type PreRunner interface {
// Unit for Group registration and identification
Unit
PreRun() error
}
// NewPreRunner takes a name and a standalone pre runner compatible function
// and turns them into a Group compatible PreRunner, ready for registration.
func NewPreRunner(name string, fn func() error) PreRunner {
return preRunner{name: name, fn: fn}
}
type preRunner struct {
fn func() error
name string
}
func (p preRunner) Name() string {
return p.name
}
func (p preRunner) PreRun() error {
return p.fn()
}
// StopNotify sends the stopped event to the running system.
type StopNotify <-chan struct{}
// Service interface should be implemented by Group Unit objects that need
// to run a blocking service until an error occurs or a shutdown request is
// made.
// The Serve method must be blocking and return an error on unexpected shutdown.
// Recoverable errors need to be handled inside the service itself.
// GracefulStop must gracefully stop the service and make the Serve call return.
//
// Since Service is managed by Group, it is considered a design flaw to call any
// of the Service methods directly in application code.
type Service interface {
// Unit for Group registration and identification
Unit
// Serve starts the GroupService and blocks.
Serve() StopNotify
// GracefulStop shuts down and cleans up the GroupService.
GracefulStop()
}
// Group builds on https://github.com/oklog/run to provide a deterministic way
// to manage service lifecycles. It allows for easy composition of elegant
// monoliths as well as adding signal handlers, metrics services, etc.
type Group struct {
f *FlagSet
readyCh chan struct{}
log *logger.Logger
name string
r run.Group
c []Config
p []PreRunner
s []Service
showRunGroup bool
configured bool
}
// NewGroup return a Group with input name.
func NewGroup(name string) Group {
return Group{
name: name,
readyCh: make(chan struct{}),
}
}
// Name shows the name of the group.
func (g Group) Name() string {
return g.name
}
// Register will inspect the provided objects implementing the Unit interface to
// see if it needs to register the objects for any of the Group bootstrap
// phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored
// by Group.
// The returned array of booleans is of the same size as the amount of provided
// Units, signaling for each provided Unit if it successfully registered with
// Group for at least one of the bootstrap phases or if it was ignored.
func (g *Group) | (units ...Unit) []bool {
g.log = logger.GetLogger(g.name)
hasRegistered := make([]bool, len(units))
for idx := range units {
if !g.configured {
// if RunConfig has been called we can no longer register Config
// phases of Units
if c, ok := units[idx].(Config); ok {
g.c = append(g.c, c)
hasRegistered[idx] = true
}
}
if p, ok := units[idx].(PreRunner); ok {
g.p = append(g.p, p)
hasRegistered[idx] = true
}
if s, ok := units[idx].(Service); ok {
g.s = append(g.s, s)
hasRegistered[idx] = true
}
}
return hasRegistered
}
// RegisterFlags returns FlagSet contains Flags in all modules.
func (g *Group) RegisterFlags() *FlagSet {
// run configuration stage
g.f = NewFlagSet(g.name)
g.f.SortFlags = false // keep order of flag registration
g.f.Usage = func() {
fmt.Printf("Flags:\n")
g.f.PrintDefaults()
}
gFS := NewFlagSet("Common Service options")
gFS.SortFlags = false
gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`)
gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units")
g.f.AddFlagSet(gFS.FlagSet)
// register flags from attached Config objects
fs := make([]*FlagSet, len(g.c))
for idx := range g.c {
// a Namer might have been deregistered
if g.c[idx] == nil {
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags")
fs[idx] = g.c[idx].FlagSet()
if fs[idx] == nil {
// no FlagSet returned
g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset")
continue
}
fs[idx].VisitAll(func(f *pflag.Flag) {
if g.f.Lookup(f.Name) != nil {
// log duplicate flag
g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag")
return
}
g.f.AddFlag(f)
})
}
return g.f
}
// RunConfig runs the Config phase of all registered Config aware Units.
// Only use this function if needing to add additional wiring between config
// and (pre)run phases and a separate PreRunner phase is not an option.
// In most cases it is best to use the Run method directly as it will run the
// Config phase prior to executing the PreRunner and Service phases.
// If an error is returned the application must shut down as it is considered
// fatal.
func (g *Group) RunConfig() (interrupted bool, err error) {
g.log = logger.GetLogger(g.name)
g.configured = true
if g.name == "" {
// use the binary name if custom name has not been provided
g.name = path.Base(os.Args[0])
}
defer func() {
if err != nil {
g.log.Error().Err(err).Msg("unexpected exit")
}
}()
// Load config from env and file
if err = config.Load(g.f.Name, g.f.FlagSet); err != nil {
return false, errors.Wrapf(err, "%s fails to load config", g.f.Name)
}
// bail early on help or version requests
switch {
case g.showRunGroup:
fmt.Println(g.ListUnits())
return true, nil
}
// Validate Config inputs
for idx := range g.c {
// a Config might have been deregistered during Run
if g.c[idx] == nil {
g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate")
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config")
if vErr := g.c[idx].Validate(); vErr != nil {
err = multierr.Append(err, vErr)
}
}
// exit on at least one Validate error
if err != nil {
return false, err
}
// log binary name and version
g.log.Info().Msg("started")
return false, nil
}
// Run will execute all phases of all registered Units and block until an error
// occurs.
// If RunConfig has been called prior to Run, the Group's Config phase will be
// skipped and Run continues with the PreRunner and Service phases.
//
// The following phases are executed in the following sequence:
//
// Config phase (serially, in order of Unit registration)
// - FlagSet() Get & register all FlagSets from Config Units.
// - Flag Parsing Using the provided args (os.Args if empty)
// - Validate() Validate Config Units. Exit on first error.
//
// PreRunner phase (serially, in order of Unit registration)
// - PreRun() Execute PreRunner Units. Exit on first error.
//
// Service phase (concurrently)
// - Serve() Execute all Service Units in separate Go routines.
// - Wait Block until one of the Serve() methods returns
// - GracefulStop() Call interrupt handlers of all Service Units.
//
// Run will return with the originating error on:
// - first Config.Validate() returning an error
// - first PreRunner.PreRun() returning an error
// - first Service.Serve() returning (error or nil)
func (g *Group) Run() (err error) {
// run config registration and flag parsing stages
if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil {
return errRun
}
defer func() {
if err != nil {
g.log.Fatal().Err(err).Stack().Msg("unexpected exit")
}
}()
// execute pre run stage and exit on error
for idx := range g.p {
// a PreRunner might have been deregistered during Run
if g.p[idx] == nil {
continue
}
g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run")
if err := g.p[idx].PreRun(); err != nil {
return err
}
}
swg := &sync.WaitGroup{}
swg.Add(len(g.s))
go func() {
swg.Wait()
close(g.readyCh)
}()
// feed our registered services to our internal run.Group
for idx := range g.s {
// a Service might have been deregistered during Run
s := g.s[idx]
if s == nil {
continue
}
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve")
g.r.Add(func() error {
notify := s.Serve()
swg.Done()
<-notify
return nil
}, func(_ error) {
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop")
s.GracefulStop()
})
}
// start registered services and block
return g.r.Run()
}
// ListUnits returns a list of all Group phases and the Units registered to each
// of them.
func (g Group) ListUnits() string {
var (
s string
t = "cli"
)
if len(g.c) > 0 {
s += "\n- config: "
for _, u := range g.c {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.p) > 0 {
s += "\n- prerun: "
for _, u := range g.p {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.s) > 0 {
s += "\n- serve : "
for _, u := range g.s {
if u != nil {
t = "svc"
s += u.Name() + " "
}
}
}
return fmt.Sprintf("Group: %s [%s]%s", g.name, t, s)
}
// WaitTillReady blocks the goroutine till all modules are ready.
func (g *Group) WaitTillReady() {
<-g.readyCh
}
| Register | identifier_name |
run.go | // Licensed to Apache Software Foundation (ASF) under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Apache Software Foundation (ASF) licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Package run implements a lifecycle framework to control modules.
package run
import (
"fmt"
"os"
"path"
"sync"
"github.com/oklog/run"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"go.uber.org/multierr"
"github.com/apache/skywalking-banyandb/pkg/config"
"github.com/apache/skywalking-banyandb/pkg/logger"
)
// FlagSet holds a pflag.FlagSet as well as an exported Name variable for
// allowing improved help usage information.
type FlagSet struct {
*pflag.FlagSet
Name string
}
// NewFlagSet returns a new FlagSet for usage in Config objects.
func NewFlagSet(name string) *FlagSet {
return &FlagSet{
FlagSet: pflag.NewFlagSet(name, pflag.ContinueOnError),
Name: name,
}
}
// Unit is the default interface an object needs to implement for it to be able
// to register with a Group.
// Name should return a short but good identifier of the Unit.
type Unit interface {
Name() string
}
// Config interface should be implemented by Group Unit objects that manage
// their own configuration through the use of flags.
// If a Unit's Validate returns an error it will stop the Group immediately.
type Config interface {
// Unit for Group registration and identification
Unit
// FlagSet returns an object's FlagSet
FlagSet() *FlagSet
// Validate checks an object's stored values
Validate() error
}
// PreRunner interface should be implemented by Group Unit objects that need
// a pre run stage before starting the Group Services.
// If a Unit's PreRun returns an error it will stop the Group immediately.
type PreRunner interface {
// Unit for Group registration and identification
Unit
PreRun() error
}
// NewPreRunner takes a name and a standalone pre runner compatible function
// and turns them into a Group compatible PreRunner, ready for registration.
func NewPreRunner(name string, fn func() error) PreRunner {
return preRunner{name: name, fn: fn}
}
type preRunner struct {
fn func() error
name string
}
func (p preRunner) Name() string {
return p.name
}
func (p preRunner) PreRun() error {
return p.fn()
}
// StopNotify sends the stopped event to the running system.
type StopNotify <-chan struct{}
// Service interface should be implemented by Group Unit objects that need
// to run a blocking service until an error occurs or a shutdown request is
// made.
// The Serve method must be blocking and return an error on unexpected shutdown.
// Recoverable errors need to be handled inside the service itself.
// GracefulStop must gracefully stop the service and make the Serve call return.
//
// Since Service is managed by Group, it is considered a design flaw to call any
// of the Service methods directly in application code.
type Service interface {
// Unit for Group registration and identification
Unit
// Serve starts the GroupService and blocks.
Serve() StopNotify
// GracefulStop shuts down and cleans up the GroupService.
GracefulStop()
}
// Group builds on https://github.com/oklog/run to provide a deterministic way
// to manage service lifecycles. It allows for easy composition of elegant
// monoliths as well as adding signal handlers, metrics services, etc.
type Group struct {
f *FlagSet
readyCh chan struct{}
log *logger.Logger
name string
r run.Group
c []Config
p []PreRunner
s []Service
showRunGroup bool
configured bool
}
// NewGroup return a Group with input name.
func NewGroup(name string) Group {
return Group{
name: name,
readyCh: make(chan struct{}),
}
}
// Name shows the name of the group.
func (g Group) Name() string {
return g.name
}
// Register will inspect the provided objects implementing the Unit interface to
// see if it needs to register the objects for any of the Group bootstrap
// phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored
// by Group.
// The returned array of booleans is of the same size as the amount of provided
// Units, signaling for each provided Unit if it successfully registered with
// Group for at least one of the bootstrap phases or if it was ignored.
func (g *Group) Register(units ...Unit) []bool {
g.log = logger.GetLogger(g.name)
hasRegistered := make([]bool, len(units))
for idx := range units {
if !g.configured {
// if RunConfig has been called we can no longer register Config
// phases of Units
if c, ok := units[idx].(Config); ok {
g.c = append(g.c, c)
hasRegistered[idx] = true
}
}
if p, ok := units[idx].(PreRunner); ok {
g.p = append(g.p, p)
hasRegistered[idx] = true
}
if s, ok := units[idx].(Service); ok {
g.s = append(g.s, s)
hasRegistered[idx] = true
}
}
return hasRegistered
}
// RegisterFlags returns FlagSet contains Flags in all modules.
func (g *Group) RegisterFlags() *FlagSet {
// run configuration stage
g.f = NewFlagSet(g.name)
g.f.SortFlags = false // keep order of flag registration
g.f.Usage = func() {
fmt.Printf("Flags:\n")
g.f.PrintDefaults()
}
gFS := NewFlagSet("Common Service options")
gFS.SortFlags = false
gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`)
gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units")
g.f.AddFlagSet(gFS.FlagSet)
// register flags from attached Config objects
fs := make([]*FlagSet, len(g.c))
for idx := range g.c {
// a Namer might have been deregistered
if g.c[idx] == nil {
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags")
fs[idx] = g.c[idx].FlagSet()
if fs[idx] == nil {
// no FlagSet returned
g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset")
continue
}
fs[idx].VisitAll(func(f *pflag.Flag) {
if g.f.Lookup(f.Name) != nil {
// log duplicate flag
g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag")
return
}
g.f.AddFlag(f)
})
}
return g.f
}
// RunConfig runs the Config phase of all registered Config aware Units.
// Only use this function if needing to add additional wiring between config
// and (pre)run phases and a separate PreRunner phase is not an option.
// In most cases it is best to use the Run method directly as it will run the
// Config phase prior to executing the PreRunner and Service phases.
// If an error is returned the application must shut down as it is considered
// fatal.
func (g *Group) RunConfig() (interrupted bool, err error) {
g.log = logger.GetLogger(g.name)
g.configured = true
if g.name == "" {
// use the binary name if custom name has not been provided
g.name = path.Base(os.Args[0])
}
defer func() {
if err != nil {
g.log.Error().Err(err).Msg("unexpected exit")
}
}()
// Load config from env and file
if err = config.Load(g.f.Name, g.f.FlagSet); err != nil {
return false, errors.Wrapf(err, "%s fails to load config", g.f.Name)
}
// bail early on help or version requests
switch {
case g.showRunGroup:
fmt.Println(g.ListUnits())
return true, nil
}
// Validate Config inputs
for idx := range g.c {
// a Config might have been deregistered during Run
if g.c[idx] == nil {
g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate")
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config")
if vErr := g.c[idx].Validate(); vErr != nil {
err = multierr.Append(err, vErr)
}
}
// exit on at least one Validate error
if err != nil {
return false, err
}
// log binary name and version
g.log.Info().Msg("started")
return false, nil
}
// Run will execute all phases of all registered Units and block until an error
// occurs.
// If RunConfig has been called prior to Run, the Group's Config phase will be
// skipped and Run continues with the PreRunner and Service phases.
//
// The following phases are executed in the following sequence:
//
// Config phase (serially, in order of Unit registration)
// - FlagSet() Get & register all FlagSets from Config Units.
// - Flag Parsing Using the provided args (os.Args if empty)
// - Validate() Validate Config Units. Exit on first error.
//
// PreRunner phase (serially, in order of Unit registration)
// - PreRun() Execute PreRunner Units. Exit on first error.
//
// Service phase (concurrently)
// - Serve() Execute all Service Units in separate Go routines.
// - Wait Block until one of the Serve() methods returns
// - GracefulStop() Call interrupt handlers of all Service Units.
//
// Run will return with the originating error on:
// - first Config.Validate() returning an error
// - first PreRunner.PreRun() returning an error
// - first Service.Serve() returning (error or nil)
func (g *Group) Run() (err error) {
// run config registration and flag parsing stages
if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil {
return errRun
}
defer func() {
if err != nil {
g.log.Fatal().Err(err).Stack().Msg("unexpected exit")
}
}()
// execute pre run stage and exit on error
for idx := range g.p {
// a PreRunner might have been deregistered during Run
if g.p[idx] == nil {
continue
}
g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run")
if err := g.p[idx].PreRun(); err != nil {
return err
}
}
swg := &sync.WaitGroup{}
swg.Add(len(g.s))
go func() {
swg.Wait()
close(g.readyCh)
}()
// feed our registered services to our internal run.Group
for idx := range g.s {
// a Service might have been deregistered during Run
s := g.s[idx]
if s == nil {
continue
}
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve")
g.r.Add(func() error {
notify := s.Serve()
swg.Done()
<-notify
return nil
}, func(_ error) {
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop")
s.GracefulStop()
})
}
// start registered services and block
return g.r.Run()
}
// ListUnits returns a list of all Group phases and the Units registered to each
// of them.
func (g Group) ListUnits() string |
// WaitTillReady blocks the goroutine till all modules are ready.
func (g *Group) WaitTillReady() {
<-g.readyCh
}
| {
var (
s string
t = "cli"
)
if len(g.c) > 0 {
s += "\n- config: "
for _, u := range g.c {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.p) > 0 {
s += "\n- prerun: "
for _, u := range g.p {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.s) > 0 {
s += "\n- serve : "
for _, u := range g.s {
if u != nil {
t = "svc"
s += u.Name() + " "
}
}
}
return fmt.Sprintf("Group: %s [%s]%s", g.name, t, s)
} | identifier_body |
run.go | // Licensed to Apache Software Foundation (ASF) under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Apache Software Foundation (ASF) licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Package run implements a lifecycle framework to control modules.
package run
import (
"fmt"
"os"
"path"
"sync"
"github.com/oklog/run"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"go.uber.org/multierr"
"github.com/apache/skywalking-banyandb/pkg/config"
"github.com/apache/skywalking-banyandb/pkg/logger"
)
// FlagSet holds a pflag.FlagSet as well as an exported Name variable for
// allowing improved help usage information.
type FlagSet struct {
*pflag.FlagSet
Name string
}
// NewFlagSet returns a new FlagSet for usage in Config objects.
func NewFlagSet(name string) *FlagSet {
return &FlagSet{
FlagSet: pflag.NewFlagSet(name, pflag.ContinueOnError),
Name: name,
}
}
// Unit is the default interface an object needs to implement for it to be able
// to register with a Group.
// Name should return a short but good identifier of the Unit.
type Unit interface {
Name() string
}
// Config interface should be implemented by Group Unit objects that manage
// their own configuration through the use of flags.
// If a Unit's Validate returns an error it will stop the Group immediately.
type Config interface {
// Unit for Group registration and identification
Unit
// FlagSet returns an object's FlagSet
FlagSet() *FlagSet
// Validate checks an object's stored values
Validate() error
}
// PreRunner interface should be implemented by Group Unit objects that need
// a pre run stage before starting the Group Services.
// If a Unit's PreRun returns an error it will stop the Group immediately.
type PreRunner interface {
// Unit for Group registration and identification
Unit
PreRun() error
}
// NewPreRunner takes a name and a standalone pre runner compatible function
// and turns them into a Group compatible PreRunner, ready for registration.
func NewPreRunner(name string, fn func() error) PreRunner {
return preRunner{name: name, fn: fn}
}
type preRunner struct {
fn func() error
name string
}
func (p preRunner) Name() string {
return p.name
}
func (p preRunner) PreRun() error {
return p.fn()
}
// StopNotify sends the stopped event to the running system.
type StopNotify <-chan struct{}
// Service interface should be implemented by Group Unit objects that need
// to run a blocking service until an error occurs or a shutdown request is
// made.
// The Serve method must be blocking and return an error on unexpected shutdown.
// Recoverable errors need to be handled inside the service itself.
// GracefulStop must gracefully stop the service and make the Serve call return.
//
// Since Service is managed by Group, it is considered a design flaw to call any
// of the Service methods directly in application code.
type Service interface {
// Unit for Group registration and identification
Unit
// Serve starts the GroupService and blocks.
Serve() StopNotify
// GracefulStop shuts down and cleans up the GroupService.
GracefulStop()
}
// Group builds on https://github.com/oklog/run to provide a deterministic way
// to manage service lifecycles. It allows for easy composition of elegant
// monoliths as well as adding signal handlers, metrics services, etc.
type Group struct {
f *FlagSet
readyCh chan struct{}
log *logger.Logger
name string
r run.Group
c []Config
p []PreRunner
s []Service
showRunGroup bool
configured bool
}
// NewGroup return a Group with input name.
func NewGroup(name string) Group {
return Group{
name: name,
readyCh: make(chan struct{}),
}
}
// Name shows the name of the group.
func (g Group) Name() string {
return g.name
}
// Register will inspect the provided objects implementing the Unit interface to
// see if it needs to register the objects for any of the Group bootstrap
// phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored
// by Group.
// The returned array of booleans is of the same size as the amount of provided
// Units, signaling for each provided Unit if it successfully registered with
// Group for at least one of the bootstrap phases or if it was ignored.
func (g *Group) Register(units ...Unit) []bool {
g.log = logger.GetLogger(g.name)
hasRegistered := make([]bool, len(units))
for idx := range units {
if !g.configured {
// if RunConfig has been called we can no longer register Config
// phases of Units
if c, ok := units[idx].(Config); ok {
g.c = append(g.c, c)
hasRegistered[idx] = true
}
}
if p, ok := units[idx].(PreRunner); ok {
g.p = append(g.p, p)
hasRegistered[idx] = true
}
if s, ok := units[idx].(Service); ok {
g.s = append(g.s, s)
hasRegistered[idx] = true
}
}
return hasRegistered
}
// RegisterFlags returns FlagSet contains Flags in all modules.
func (g *Group) RegisterFlags() *FlagSet {
// run configuration stage
g.f = NewFlagSet(g.name)
g.f.SortFlags = false // keep order of flag registration
g.f.Usage = func() {
fmt.Printf("Flags:\n")
g.f.PrintDefaults()
}
gFS := NewFlagSet("Common Service options")
gFS.SortFlags = false
gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`)
gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units")
g.f.AddFlagSet(gFS.FlagSet)
// register flags from attached Config objects
fs := make([]*FlagSet, len(g.c))
for idx := range g.c {
// a Namer might have been deregistered
if g.c[idx] == nil {
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags")
fs[idx] = g.c[idx].FlagSet()
if fs[idx] == nil {
// no FlagSet returned
g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset")
continue
}
fs[idx].VisitAll(func(f *pflag.Flag) {
if g.f.Lookup(f.Name) != nil {
// log duplicate flag
g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag")
return
}
g.f.AddFlag(f)
})
}
return g.f
}
// RunConfig runs the Config phase of all registered Config aware Units.
// Only use this function if needing to add additional wiring between config
// and (pre)run phases and a separate PreRunner phase is not an option.
// In most cases it is best to use the Run method directly as it will run the
// Config phase prior to executing the PreRunner and Service phases.
// If an error is returned the application must shut down as it is considered
// fatal.
func (g *Group) RunConfig() (interrupted bool, err error) {
g.log = logger.GetLogger(g.name)
g.configured = true
if g.name == "" {
// use the binary name if custom name has not been provided
g.name = path.Base(os.Args[0])
}
defer func() {
if err != nil {
g.log.Error().Err(err).Msg("unexpected exit")
}
}()
// Load config from env and file
if err = config.Load(g.f.Name, g.f.FlagSet); err != nil {
return false, errors.Wrapf(err, "%s fails to load config", g.f.Name)
}
// bail early on help or version requests
switch { | fmt.Println(g.ListUnits())
return true, nil
}
// Validate Config inputs
for idx := range g.c {
// a Config might have been deregistered during Run
if g.c[idx] == nil {
g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate")
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config")
if vErr := g.c[idx].Validate(); vErr != nil {
err = multierr.Append(err, vErr)
}
}
// exit on at least one Validate error
if err != nil {
return false, err
}
// log binary name and version
g.log.Info().Msg("started")
return false, nil
}
// Run will execute all phases of all registered Units and block until an error
// occurs.
// If RunConfig has been called prior to Run, the Group's Config phase will be
// skipped and Run continues with the PreRunner and Service phases.
//
// The following phases are executed in the following sequence:
//
// Config phase (serially, in order of Unit registration)
// - FlagSet() Get & register all FlagSets from Config Units.
// - Flag Parsing Using the provided args (os.Args if empty)
// - Validate() Validate Config Units. Exit on first error.
//
// PreRunner phase (serially, in order of Unit registration)
// - PreRun() Execute PreRunner Units. Exit on first error.
//
// Service phase (concurrently)
// - Serve() Execute all Service Units in separate Go routines.
// - Wait Block until one of the Serve() methods returns
// - GracefulStop() Call interrupt handlers of all Service Units.
//
// Run will return with the originating error on:
// - first Config.Validate() returning an error
// - first PreRunner.PreRun() returning an error
// - first Service.Serve() returning (error or nil)
func (g *Group) Run() (err error) {
// run config registration and flag parsing stages
if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil {
return errRun
}
defer func() {
if err != nil {
g.log.Fatal().Err(err).Stack().Msg("unexpected exit")
}
}()
// execute pre run stage and exit on error
for idx := range g.p {
// a PreRunner might have been deregistered during Run
if g.p[idx] == nil {
continue
}
g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run")
if err := g.p[idx].PreRun(); err != nil {
return err
}
}
swg := &sync.WaitGroup{}
swg.Add(len(g.s))
go func() {
swg.Wait()
close(g.readyCh)
}()
// feed our registered services to our internal run.Group
for idx := range g.s {
// a Service might have been deregistered during Run
s := g.s[idx]
if s == nil {
continue
}
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve")
g.r.Add(func() error {
notify := s.Serve()
swg.Done()
<-notify
return nil
}, func(_ error) {
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop")
s.GracefulStop()
})
}
// start registered services and block
return g.r.Run()
}
// ListUnits returns a list of all Group phases and the Units registered to each
// of them.
func (g Group) ListUnits() string {
var (
s string
t = "cli"
)
if len(g.c) > 0 {
s += "\n- config: "
for _, u := range g.c {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.p) > 0 {
s += "\n- prerun: "
for _, u := range g.p {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.s) > 0 {
s += "\n- serve : "
for _, u := range g.s {
if u != nil {
t = "svc"
s += u.Name() + " "
}
}
}
return fmt.Sprintf("Group: %s [%s]%s", g.name, t, s)
}
// WaitTillReady blocks the goroutine till all modules are ready.
func (g *Group) WaitTillReady() {
<-g.readyCh
} | case g.showRunGroup: | random_line_split |
run.go | // Licensed to Apache Software Foundation (ASF) under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Apache Software Foundation (ASF) licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Package run implements a lifecycle framework to control modules.
package run
import (
"fmt"
"os"
"path"
"sync"
"github.com/oklog/run"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"go.uber.org/multierr"
"github.com/apache/skywalking-banyandb/pkg/config"
"github.com/apache/skywalking-banyandb/pkg/logger"
)
// FlagSet holds a pflag.FlagSet as well as an exported Name variable for
// allowing improved help usage information.
type FlagSet struct {
*pflag.FlagSet
Name string
}
// NewFlagSet returns a new FlagSet for usage in Config objects.
func NewFlagSet(name string) *FlagSet {
return &FlagSet{
FlagSet: pflag.NewFlagSet(name, pflag.ContinueOnError),
Name: name,
}
}
// Unit is the default interface an object needs to implement for it to be able
// to register with a Group.
// Name should return a short but good identifier of the Unit.
type Unit interface {
Name() string
}
// Config interface should be implemented by Group Unit objects that manage
// their own configuration through the use of flags.
// If a Unit's Validate returns an error it will stop the Group immediately.
type Config interface {
// Unit for Group registration and identification
Unit
// FlagSet returns an object's FlagSet
FlagSet() *FlagSet
// Validate checks an object's stored values
Validate() error
}
// PreRunner interface should be implemented by Group Unit objects that need
// a pre run stage before starting the Group Services.
// If a Unit's PreRun returns an error it will stop the Group immediately.
type PreRunner interface {
// Unit for Group registration and identification
Unit
PreRun() error
}
// NewPreRunner takes a name and a standalone pre runner compatible function
// and turns them into a Group compatible PreRunner, ready for registration.
func NewPreRunner(name string, fn func() error) PreRunner {
return preRunner{name: name, fn: fn}
}
type preRunner struct {
fn func() error
name string
}
func (p preRunner) Name() string {
return p.name
}
func (p preRunner) PreRun() error {
return p.fn()
}
// StopNotify sends the stopped event to the running system.
type StopNotify <-chan struct{}
// Service interface should be implemented by Group Unit objects that need
// to run a blocking service until an error occurs or a shutdown request is
// made.
// The Serve method must be blocking and return an error on unexpected shutdown.
// Recoverable errors need to be handled inside the service itself.
// GracefulStop must gracefully stop the service and make the Serve call return.
//
// Since Service is managed by Group, it is considered a design flaw to call any
// of the Service methods directly in application code.
type Service interface {
// Unit for Group registration and identification
Unit
// Serve starts the GroupService and blocks.
Serve() StopNotify
// GracefulStop shuts down and cleans up the GroupService.
GracefulStop()
}
// Group builds on https://github.com/oklog/run to provide a deterministic way
// to manage service lifecycles. It allows for easy composition of elegant
// monoliths as well as adding signal handlers, metrics services, etc.
type Group struct {
f *FlagSet
readyCh chan struct{}
log *logger.Logger
name string
r run.Group
c []Config
p []PreRunner
s []Service
showRunGroup bool
configured bool
}
// NewGroup return a Group with input name.
func NewGroup(name string) Group {
return Group{
name: name,
readyCh: make(chan struct{}),
}
}
// Name shows the name of the group.
func (g Group) Name() string {
return g.name
}
// Register will inspect the provided objects implementing the Unit interface to
// see if it needs to register the objects for any of the Group bootstrap
// phases. If a Unit doesn't satisfy any of the bootstrap phases it is ignored
// by Group.
// The returned array of booleans is of the same size as the amount of provided
// Units, signaling for each provided Unit if it successfully registered with
// Group for at least one of the bootstrap phases or if it was ignored.
func (g *Group) Register(units ...Unit) []bool {
g.log = logger.GetLogger(g.name)
hasRegistered := make([]bool, len(units))
for idx := range units {
if !g.configured {
// if RunConfig has been called we can no longer register Config
// phases of Units
if c, ok := units[idx].(Config); ok {
g.c = append(g.c, c)
hasRegistered[idx] = true
}
}
if p, ok := units[idx].(PreRunner); ok {
g.p = append(g.p, p)
hasRegistered[idx] = true
}
if s, ok := units[idx].(Service); ok {
g.s = append(g.s, s)
hasRegistered[idx] = true
}
}
return hasRegistered
}
// RegisterFlags returns FlagSet contains Flags in all modules.
func (g *Group) RegisterFlags() *FlagSet {
// run configuration stage
g.f = NewFlagSet(g.name)
g.f.SortFlags = false // keep order of flag registration
g.f.Usage = func() {
fmt.Printf("Flags:\n")
g.f.PrintDefaults()
}
gFS := NewFlagSet("Common Service options")
gFS.SortFlags = false
gFS.StringVarP(&g.name, "name", "n", g.name, `name of this service`)
gFS.BoolVar(&g.showRunGroup, "show-rungroup-units", false, "show rungroup units")
g.f.AddFlagSet(gFS.FlagSet)
// register flags from attached Config objects
fs := make([]*FlagSet, len(g.c))
for idx := range g.c {
// a Namer might have been deregistered
if g.c[idx] == nil {
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("registered", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("register flags")
fs[idx] = g.c[idx].FlagSet()
if fs[idx] == nil {
// no FlagSet returned
g.log.Debug().Str("name", g.c[idx].Name()).Msg("config object did not return a flagset")
continue
}
fs[idx].VisitAll(func(f *pflag.Flag) {
if g.f.Lookup(f.Name) != nil {
// log duplicate flag
g.log.Warn().Str("name", f.Name).Uint32("registered", uint32(idx+1)).Msg("ignoring duplicate flag")
return
}
g.f.AddFlag(f)
})
}
return g.f
}
// RunConfig runs the Config phase of all registered Config aware Units.
// Only use this function if needing to add additional wiring between config
// and (pre)run phases and a separate PreRunner phase is not an option.
// In most cases it is best to use the Run method directly as it will run the
// Config phase prior to executing the PreRunner and Service phases.
// If an error is returned the application must shut down as it is considered
// fatal.
func (g *Group) RunConfig() (interrupted bool, err error) {
g.log = logger.GetLogger(g.name)
g.configured = true
if g.name == "" {
// use the binary name if custom name has not been provided
g.name = path.Base(os.Args[0])
}
defer func() {
if err != nil {
g.log.Error().Err(err).Msg("unexpected exit")
}
}()
// Load config from env and file
if err = config.Load(g.f.Name, g.f.FlagSet); err != nil {
return false, errors.Wrapf(err, "%s fails to load config", g.f.Name)
}
// bail early on help or version requests
switch {
case g.showRunGroup:
fmt.Println(g.ListUnits())
return true, nil
}
// Validate Config inputs
for idx := range g.c {
// a Config might have been deregistered during Run
if g.c[idx] == nil {
g.log.Debug().Uint32("ran", uint32(idx+1)).Msg("skipping validate")
continue
}
g.log.Debug().Str("name", g.c[idx].Name()).Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.c))).Msg("validate config")
if vErr := g.c[idx].Validate(); vErr != nil {
err = multierr.Append(err, vErr)
}
}
// exit on at least one Validate error
if err != nil {
return false, err
}
// log binary name and version
g.log.Info().Msg("started")
return false, nil
}
// Run will execute all phases of all registered Units and block until an error
// occurs.
// If RunConfig has been called prior to Run, the Group's Config phase will be
// skipped and Run continues with the PreRunner and Service phases.
//
// The following phases are executed in the following sequence:
//
// Config phase (serially, in order of Unit registration)
// - FlagSet() Get & register all FlagSets from Config Units.
// - Flag Parsing Using the provided args (os.Args if empty)
// - Validate() Validate Config Units. Exit on first error.
//
// PreRunner phase (serially, in order of Unit registration)
// - PreRun() Execute PreRunner Units. Exit on first error.
//
// Service phase (concurrently)
// - Serve() Execute all Service Units in separate Go routines.
// - Wait Block until one of the Serve() methods returns
// - GracefulStop() Call interrupt handlers of all Service Units.
//
// Run will return with the originating error on:
// - first Config.Validate() returning an error
// - first PreRunner.PreRun() returning an error
// - first Service.Serve() returning (error or nil)
func (g *Group) Run() (err error) {
// run config registration and flag parsing stages
if interrupted, errRun := g.RunConfig(); interrupted || errRun != nil {
return errRun
}
defer func() {
if err != nil {
g.log.Fatal().Err(err).Stack().Msg("unexpected exit")
}
}()
// execute pre run stage and exit on error
for idx := range g.p {
// a PreRunner might have been deregistered during Run
if g.p[idx] == nil {
continue
}
g.log.Debug().Uint32("ran", uint32(idx+1)).Uint32("total", uint32(len(g.p))).Str("name", g.p[idx].Name()).Msg("pre-run")
if err := g.p[idx].PreRun(); err != nil {
return err
}
}
swg := &sync.WaitGroup{}
swg.Add(len(g.s))
go func() {
swg.Wait()
close(g.readyCh)
}()
// feed our registered services to our internal run.Group
for idx := range g.s {
// a Service might have been deregistered during Run
s := g.s[idx]
if s == nil {
continue
}
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("serve")
g.r.Add(func() error {
notify := s.Serve()
swg.Done()
<-notify
return nil
}, func(_ error) {
g.log.Debug().Uint32("total", uint32(len(g.s))).Uint32("ran", uint32(idx+1)).Str("name", s.Name()).Msg("stop")
s.GracefulStop()
})
}
// start registered services and block
return g.r.Run()
}
// ListUnits returns a list of all Group phases and the Units registered to each
// of them.
func (g Group) ListUnits() string {
var (
s string
t = "cli"
)
if len(g.c) > 0 |
if len(g.p) > 0 {
s += "\n- prerun: "
for _, u := range g.p {
if u != nil {
s += u.Name() + " "
}
}
}
if len(g.s) > 0 {
s += "\n- serve : "
for _, u := range g.s {
if u != nil {
t = "svc"
s += u.Name() + " "
}
}
}
return fmt.Sprintf("Group: %s [%s]%s", g.name, t, s)
}
// WaitTillReady blocks the goroutine till all modules are ready.
func (g *Group) WaitTillReady() {
<-g.readyCh
}
| {
s += "\n- config: "
for _, u := range g.c {
if u != nil {
s += u.Name() + " "
}
}
} | conditional_block |
test.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: test.proto
/*
Package grpctest is a generated protocol buffer package.
It is generated from these files:
test.proto
It has these top-level messages:
TestRequest
TestResponse
PrintKVRequest
PrintKVResponse
*/
package grpctest
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestRequest struct {
Input int32 `protobuf:"varint,1,opt,name=Input" json:"Input,omitempty"`
}
func (m *TestRequest) Reset() { *m = TestRequest{} }
func (m *TestRequest) String() string { return proto.CompactTextString(m) }
func (*TestRequest) ProtoMessage() {}
func (*TestRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestRequest) GetInput() int32 {
if m != nil {
return m.Input
}
return 0
}
type TestResponse struct {
Output int32 `protobuf:"varint,2,opt,name=Output" json:"Output,omitempty"`
}
func (m *TestResponse) Reset() { *m = TestResponse{} }
func (m *TestResponse) String() string { return proto.CompactTextString(m) }
func (*TestResponse) ProtoMessage() {}
func (*TestResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *TestResponse) GetOutput() int32 {
if m != nil {
return m.Output
}
return 0
}
type PrintKVRequest struct {
Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"`
// Types that are valid to be assigned to Value:
// *PrintKVRequest_ValueString
// *PrintKVRequest_ValueInt
Value isPrintKVRequest_Value `protobuf_oneof:"Value"`
}
func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} }
func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) }
func (*PrintKVRequest) ProtoMessage() {}
func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isPrintKVRequest_Value interface {
isPrintKVRequest_Value()
}
type PrintKVRequest_ValueString struct {
ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"`
}
type PrintKVRequest_ValueInt struct {
ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"`
}
func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {}
func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {}
func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *PrintKVRequest) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *PrintKVRequest) GetValueString() string |
func (m *PrintKVRequest) GetValueInt() int32 {
if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok {
return x.ValueInt
}
return 0
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{
(*PrintKVRequest_ValueString)(nil),
(*PrintKVRequest_ValueInt)(nil),
}
}
func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.ValueString)
case *PrintKVRequest_ValueInt:
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.ValueInt))
case nil:
default:
return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x)
}
return nil
}
func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*PrintKVRequest)
switch tag {
case 2: // Value.ValueString
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &PrintKVRequest_ValueString{x}
return true, err
case 3: // Value.ValueInt
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &PrintKVRequest_ValueInt{int32(x)}
return true, err
default:
return false, nil
}
}
func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.ValueString)))
n += len(x.ValueString)
case *PrintKVRequest_ValueInt:
n += proto.SizeVarint(3<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.ValueInt))
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type PrintKVResponse struct {
}
func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} }
func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) }
func (*PrintKVResponse) ProtoMessage() {}
func (*PrintKVResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func init() {
proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest")
proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse")
proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest")
proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Test service
type TestClient interface {
Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error)
PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error)
}
type testClient struct {
cc *grpc.ClientConn
}
func NewTestClient(cc *grpc.ClientConn) TestClient {
return &testClient{cc}
}
func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) {
out := new(TestResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) {
out := new(PrintKVResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Test service
type TestServer interface {
Double(context.Context, *TestRequest) (*TestResponse, error)
PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error)
}
func RegisterTestServer(s *grpc.Server, srv TestServer) {
s.RegisterService(&_Test_serviceDesc, srv)
}
func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TestRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TestServer).Double(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/Double",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).Double(ctx, req.(*TestRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PrintKVRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TestServer).PrintKV(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/PrintKV",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Test_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpctest.Test",
HandlerType: (*TestServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Double",
Handler: _Test_Double_Handler,
},
{
MethodName: "PrintKV",
Handler: _Test_PrintKV_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "test.proto",
}
func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 240 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e,
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x48, 0x2f, 0x2a, 0x48, 0x06, 0xf1, 0x95, 0x94,
0xb9, 0xb8, 0x43, 0x52, 0x8b, 0x4b, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x44, 0xb8,
0x58, 0x3d, 0xf3, 0x0a, 0x4a, 0x4b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, 0x20, 0x1c, 0x25,
0x35, 0x2e, 0x1e, 0x88, 0xa2, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x21, 0x31, 0x2e, 0x36, 0xff,
0xd2, 0x12, 0x90, 0x32, 0x26, 0xb0, 0x32, 0x28, 0x4f, 0x29, 0x97, 0x8b, 0x2f, 0xa0, 0x28, 0x33,
0xaf, 0xc4, 0x3b, 0x0c, 0x66, 0x9e, 0x00, 0x17, 0xb3, 0x77, 0x6a, 0x25, 0xd8, 0x34, 0xce, 0x20,
0x10, 0x53, 0x48, 0x89, 0x8b, 0x3b, 0x2c, 0x31, 0xa7, 0x34, 0x35, 0xb8, 0xa4, 0x28, 0x33, 0x2f,
0x1d, 0x6c, 0x00, 0xa7, 0x07, 0x43, 0x10, 0xb2, 0xa0, 0x90, 0x0c, 0x17, 0x07, 0x98, 0xeb, 0x99,
0x57, 0x22, 0xc1, 0x0c, 0xb2, 0xc1, 0x83, 0x21, 0x08, 0x2e, 0xe2, 0xc4, 0xce, 0xc5, 0x0a, 0x66,
0x2b, 0x09, 0x72, 0xf1, 0xc3, 0xad, 0x83, 0xb8, 0xcc, 0xa8, 0x99, 0x91, 0x8b, 0x05, 0xe4, 0x54,
0x21, 0x4b, 0x2e, 0x36, 0x97, 0xfc, 0xd2, 0xa4, 0x9c, 0x54, 0x21, 0x51, 0x3d, 0x98, 0x67, 0xf5,
0x90, 0x7c, 0x2a, 0x25, 0x86, 0x2e, 0x0c, 0x31, 0x41, 0x89, 0x41, 0xc8, 0x81, 0x8b, 0x1d, 0x6a,
0xac, 0x90, 0x04, 0x42, 0x11, 0xaa, 0xc7, 0xa4, 0x24, 0xb1, 0xc8, 0xc0, 0x4c, 0x48, 0x62, 0x03,
0x87, 0xb2, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x34, 0x25, 0xf9, 0xb5, 0x73, 0x01, 0x00, 0x00,
}
| {
if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok {
return x.ValueString
}
return ""
} | identifier_body |
test.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: test.proto
/*
Package grpctest is a generated protocol buffer package.
It is generated from these files:
test.proto
It has these top-level messages:
TestRequest
TestResponse
PrintKVRequest
PrintKVResponse
*/
package grpctest
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestRequest struct {
Input int32 `protobuf:"varint,1,opt,name=Input" json:"Input,omitempty"`
}
func (m *TestRequest) Reset() { *m = TestRequest{} }
func (m *TestRequest) String() string { return proto.CompactTextString(m) }
func (*TestRequest) ProtoMessage() {}
func (*TestRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestRequest) GetInput() int32 {
if m != nil {
return m.Input
}
return 0
}
type TestResponse struct {
Output int32 `protobuf:"varint,2,opt,name=Output" json:"Output,omitempty"`
}
func (m *TestResponse) Reset() { *m = TestResponse{} }
func (m *TestResponse) String() string { return proto.CompactTextString(m) }
func (*TestResponse) ProtoMessage() {}
func (*TestResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *TestResponse) GetOutput() int32 {
if m != nil {
return m.Output
}
return 0
}
type PrintKVRequest struct {
Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"`
// Types that are valid to be assigned to Value:
// *PrintKVRequest_ValueString
// *PrintKVRequest_ValueInt
Value isPrintKVRequest_Value `protobuf_oneof:"Value"`
}
func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} }
func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) }
func (*PrintKVRequest) ProtoMessage() {}
func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isPrintKVRequest_Value interface {
isPrintKVRequest_Value()
}
type PrintKVRequest_ValueString struct {
ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"`
}
type PrintKVRequest_ValueInt struct {
ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"`
}
func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {}
func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {}
func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *PrintKVRequest) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *PrintKVRequest) GetValueString() string {
if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok {
return x.ValueString
}
return ""
}
func (m *PrintKVRequest) GetValueInt() int32 {
if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok {
return x.ValueInt
}
return 0
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{
(*PrintKVRequest_ValueString)(nil),
(*PrintKVRequest_ValueInt)(nil),
}
}
func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.ValueString)
case *PrintKVRequest_ValueInt:
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.ValueInt))
case nil:
default:
return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x)
}
return nil
}
func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*PrintKVRequest)
switch tag {
case 2: // Value.ValueString
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &PrintKVRequest_ValueString{x}
return true, err
case 3: // Value.ValueInt
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &PrintKVRequest_ValueInt{int32(x)}
return true, err
default:
return false, nil
}
}
func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.ValueString)))
n += len(x.ValueString)
case *PrintKVRequest_ValueInt:
n += proto.SizeVarint(3<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.ValueInt))
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type PrintKVResponse struct {
}
func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} }
func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) }
func (*PrintKVResponse) ProtoMessage() {}
func (*PrintKVResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func init() {
proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest")
proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse")
proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest")
proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Test service
type TestClient interface {
Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error)
PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error)
}
type testClient struct {
cc *grpc.ClientConn
}
func NewTestClient(cc *grpc.ClientConn) TestClient {
return &testClient{cc}
}
func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) {
out := new(TestResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) {
out := new(PrintKVResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Test service
type TestServer interface {
Double(context.Context, *TestRequest) (*TestResponse, error)
PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error)
}
func RegisterTestServer(s *grpc.Server, srv TestServer) {
s.RegisterService(&_Test_serviceDesc, srv)
}
func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TestRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil |
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/Double",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).Double(ctx, req.(*TestRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PrintKVRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TestServer).PrintKV(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/PrintKV",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Test_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpctest.Test",
HandlerType: (*TestServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Double",
Handler: _Test_Double_Handler,
},
{
MethodName: "PrintKV",
Handler: _Test_PrintKV_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "test.proto",
}
func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 240 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e,
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x48, 0x2f, 0x2a, 0x48, 0x06, 0xf1, 0x95, 0x94,
0xb9, 0xb8, 0x43, 0x52, 0x8b, 0x4b, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x44, 0xb8,
0x58, 0x3d, 0xf3, 0x0a, 0x4a, 0x4b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, 0x20, 0x1c, 0x25,
0x35, 0x2e, 0x1e, 0x88, 0xa2, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x21, 0x31, 0x2e, 0x36, 0xff,
0xd2, 0x12, 0x90, 0x32, 0x26, 0xb0, 0x32, 0x28, 0x4f, 0x29, 0x97, 0x8b, 0x2f, 0xa0, 0x28, 0x33,
0xaf, 0xc4, 0x3b, 0x0c, 0x66, 0x9e, 0x00, 0x17, 0xb3, 0x77, 0x6a, 0x25, 0xd8, 0x34, 0xce, 0x20,
0x10, 0x53, 0x48, 0x89, 0x8b, 0x3b, 0x2c, 0x31, 0xa7, 0x34, 0x35, 0xb8, 0xa4, 0x28, 0x33, 0x2f,
0x1d, 0x6c, 0x00, 0xa7, 0x07, 0x43, 0x10, 0xb2, 0xa0, 0x90, 0x0c, 0x17, 0x07, 0x98, 0xeb, 0x99,
0x57, 0x22, 0xc1, 0x0c, 0xb2, 0xc1, 0x83, 0x21, 0x08, 0x2e, 0xe2, 0xc4, 0xce, 0xc5, 0x0a, 0x66,
0x2b, 0x09, 0x72, 0xf1, 0xc3, 0xad, 0x83, 0xb8, 0xcc, 0xa8, 0x99, 0x91, 0x8b, 0x05, 0xe4, 0x54,
0x21, 0x4b, 0x2e, 0x36, 0x97, 0xfc, 0xd2, 0xa4, 0x9c, 0x54, 0x21, 0x51, 0x3d, 0x98, 0x67, 0xf5,
0x90, 0x7c, 0x2a, 0x25, 0x86, 0x2e, 0x0c, 0x31, 0x41, 0x89, 0x41, 0xc8, 0x81, 0x8b, 0x1d, 0x6a,
0xac, 0x90, 0x04, 0x42, 0x11, 0xaa, 0xc7, 0xa4, 0x24, 0xb1, 0xc8, 0xc0, 0x4c, 0x48, 0x62, 0x03,
0x87, 0xb2, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x34, 0x25, 0xf9, 0xb5, 0x73, 0x01, 0x00, 0x00,
}
| {
return srv.(TestServer).Double(ctx, in)
} | conditional_block |
test.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: test.proto
/*
Package grpctest is a generated protocol buffer package.
It is generated from these files:
test.proto
It has these top-level messages:
TestRequest
TestResponse
PrintKVRequest
PrintKVResponse
*/
package grpctest
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestRequest struct {
Input int32 `protobuf:"varint,1,opt,name=Input" json:"Input,omitempty"`
}
func (m *TestRequest) Reset() { *m = TestRequest{} }
func (m *TestRequest) String() string { return proto.CompactTextString(m) }
func (*TestRequest) ProtoMessage() {}
func (*TestRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestRequest) GetInput() int32 {
if m != nil {
return m.Input
}
return 0
}
type TestResponse struct {
Output int32 `protobuf:"varint,2,opt,name=Output" json:"Output,omitempty"`
}
func (m *TestResponse) Reset() { *m = TestResponse{} }
func (m *TestResponse) String() string { return proto.CompactTextString(m) }
func (*TestResponse) ProtoMessage() {}
func (*TestResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *TestResponse) GetOutput() int32 {
if m != nil {
return m.Output
}
return 0
}
type PrintKVRequest struct {
Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"`
// Types that are valid to be assigned to Value:
// *PrintKVRequest_ValueString
// *PrintKVRequest_ValueInt
Value isPrintKVRequest_Value `protobuf_oneof:"Value"`
}
func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} }
func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) }
func (*PrintKVRequest) ProtoMessage() {}
func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isPrintKVRequest_Value interface {
isPrintKVRequest_Value()
}
type PrintKVRequest_ValueString struct {
ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"`
}
type PrintKVRequest_ValueInt struct {
ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"`
}
func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {}
func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {}
func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *PrintKVRequest) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *PrintKVRequest) GetValueString() string {
if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok {
return x.ValueString
}
return ""
}
func (m *PrintKVRequest) GetValueInt() int32 {
if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok {
return x.ValueInt
}
return 0
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{
(*PrintKVRequest_ValueString)(nil),
(*PrintKVRequest_ValueInt)(nil),
}
}
func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.ValueString)
case *PrintKVRequest_ValueInt:
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.ValueInt))
case nil:
default:
return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x)
}
return nil
}
func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*PrintKVRequest)
switch tag {
case 2: // Value.ValueString
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &PrintKVRequest_ValueString{x}
return true, err
case 3: // Value.ValueInt
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &PrintKVRequest_ValueInt{int32(x)}
return true, err
default:
return false, nil
}
}
func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.ValueString)))
n += len(x.ValueString)
case *PrintKVRequest_ValueInt:
n += proto.SizeVarint(3<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.ValueInt))
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type PrintKVResponse struct {
}
func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} }
func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) }
func (*PrintKVResponse) ProtoMessage() {}
func (*PrintKVResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func init() {
proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest")
proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse")
proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest")
proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Test service
type TestClient interface {
Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error)
PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error)
}
type testClient struct {
cc *grpc.ClientConn
}
func NewTestClient(cc *grpc.ClientConn) TestClient {
return &testClient{cc}
}
func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) {
out := new(TestResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) {
out := new(PrintKVResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil | }
// Server API for Test service
type TestServer interface {
Double(context.Context, *TestRequest) (*TestResponse, error)
PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error)
}
func RegisterTestServer(s *grpc.Server, srv TestServer) {
s.RegisterService(&_Test_serviceDesc, srv)
}
func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TestRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TestServer).Double(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/Double",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).Double(ctx, req.(*TestRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PrintKVRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TestServer).PrintKV(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/PrintKV",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Test_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpctest.Test",
HandlerType: (*TestServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Double",
Handler: _Test_Double_Handler,
},
{
MethodName: "PrintKV",
Handler: _Test_PrintKV_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "test.proto",
}
func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 240 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e,
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x48, 0x2f, 0x2a, 0x48, 0x06, 0xf1, 0x95, 0x94,
0xb9, 0xb8, 0x43, 0x52, 0x8b, 0x4b, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x44, 0xb8,
0x58, 0x3d, 0xf3, 0x0a, 0x4a, 0x4b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, 0x20, 0x1c, 0x25,
0x35, 0x2e, 0x1e, 0x88, 0xa2, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x21, 0x31, 0x2e, 0x36, 0xff,
0xd2, 0x12, 0x90, 0x32, 0x26, 0xb0, 0x32, 0x28, 0x4f, 0x29, 0x97, 0x8b, 0x2f, 0xa0, 0x28, 0x33,
0xaf, 0xc4, 0x3b, 0x0c, 0x66, 0x9e, 0x00, 0x17, 0xb3, 0x77, 0x6a, 0x25, 0xd8, 0x34, 0xce, 0x20,
0x10, 0x53, 0x48, 0x89, 0x8b, 0x3b, 0x2c, 0x31, 0xa7, 0x34, 0x35, 0xb8, 0xa4, 0x28, 0x33, 0x2f,
0x1d, 0x6c, 0x00, 0xa7, 0x07, 0x43, 0x10, 0xb2, 0xa0, 0x90, 0x0c, 0x17, 0x07, 0x98, 0xeb, 0x99,
0x57, 0x22, 0xc1, 0x0c, 0xb2, 0xc1, 0x83, 0x21, 0x08, 0x2e, 0xe2, 0xc4, 0xce, 0xc5, 0x0a, 0x66,
0x2b, 0x09, 0x72, 0xf1, 0xc3, 0xad, 0x83, 0xb8, 0xcc, 0xa8, 0x99, 0x91, 0x8b, 0x05, 0xe4, 0x54,
0x21, 0x4b, 0x2e, 0x36, 0x97, 0xfc, 0xd2, 0xa4, 0x9c, 0x54, 0x21, 0x51, 0x3d, 0x98, 0x67, 0xf5,
0x90, 0x7c, 0x2a, 0x25, 0x86, 0x2e, 0x0c, 0x31, 0x41, 0x89, 0x41, 0xc8, 0x81, 0x8b, 0x1d, 0x6a,
0xac, 0x90, 0x04, 0x42, 0x11, 0xaa, 0xc7, 0xa4, 0x24, 0xb1, 0xc8, 0xc0, 0x4c, 0x48, 0x62, 0x03,
0x87, 0xb2, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x34, 0x25, 0xf9, 0xb5, 0x73, 0x01, 0x00, 0x00,
} | random_line_split |
|
test.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: test.proto
/*
Package grpctest is a generated protocol buffer package.
It is generated from these files:
test.proto
It has these top-level messages:
TestRequest
TestResponse
PrintKVRequest
PrintKVResponse
*/
package grpctest
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type TestRequest struct {
Input int32 `protobuf:"varint,1,opt,name=Input" json:"Input,omitempty"`
}
func (m *TestRequest) Reset() { *m = TestRequest{} }
func (m *TestRequest) String() string { return proto.CompactTextString(m) }
func (*TestRequest) ProtoMessage() {}
func (*TestRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TestRequest) GetInput() int32 {
if m != nil {
return m.Input
}
return 0
}
type TestResponse struct {
Output int32 `protobuf:"varint,2,opt,name=Output" json:"Output,omitempty"`
}
func (m *TestResponse) Reset() { *m = TestResponse{} }
func (m *TestResponse) String() string { return proto.CompactTextString(m) }
func (*TestResponse) ProtoMessage() {}
func (*TestResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *TestResponse) GetOutput() int32 {
if m != nil {
return m.Output
}
return 0
}
type PrintKVRequest struct {
Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"`
// Types that are valid to be assigned to Value:
// *PrintKVRequest_ValueString
// *PrintKVRequest_ValueInt
Value isPrintKVRequest_Value `protobuf_oneof:"Value"`
}
func (m *PrintKVRequest) Reset() { *m = PrintKVRequest{} }
func (m *PrintKVRequest) String() string { return proto.CompactTextString(m) }
func (*PrintKVRequest) ProtoMessage() {}
func (*PrintKVRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isPrintKVRequest_Value interface {
isPrintKVRequest_Value()
}
type PrintKVRequest_ValueString struct {
ValueString string `protobuf:"bytes,2,opt,name=ValueString,oneof"`
}
type PrintKVRequest_ValueInt struct {
ValueInt int32 `protobuf:"varint,3,opt,name=ValueInt,oneof"`
}
func (*PrintKVRequest_ValueString) isPrintKVRequest_Value() {}
func (*PrintKVRequest_ValueInt) isPrintKVRequest_Value() {}
func (m *PrintKVRequest) GetValue() isPrintKVRequest_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *PrintKVRequest) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *PrintKVRequest) GetValueString() string {
if x, ok := m.GetValue().(*PrintKVRequest_ValueString); ok {
return x.ValueString
}
return ""
}
func (m *PrintKVRequest) GetValueInt() int32 {
if x, ok := m.GetValue().(*PrintKVRequest_ValueInt); ok {
return x.ValueInt
}
return 0
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*PrintKVRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _PrintKVRequest_OneofMarshaler, _PrintKVRequest_OneofUnmarshaler, _PrintKVRequest_OneofSizer, []interface{}{
(*PrintKVRequest_ValueString)(nil),
(*PrintKVRequest_ValueInt)(nil),
}
}
func _PrintKVRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.ValueString)
case *PrintKVRequest_ValueInt:
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.ValueInt))
case nil:
default:
return fmt.Errorf("PrintKVRequest.Value has unexpected type %T", x)
}
return nil
}
func _PrintKVRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*PrintKVRequest)
switch tag {
case 2: // Value.ValueString
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &PrintKVRequest_ValueString{x}
return true, err
case 3: // Value.ValueInt
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &PrintKVRequest_ValueInt{int32(x)}
return true, err
default:
return false, nil
}
}
func _PrintKVRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*PrintKVRequest)
// Value
switch x := m.Value.(type) {
case *PrintKVRequest_ValueString:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.ValueString)))
n += len(x.ValueString)
case *PrintKVRequest_ValueInt:
n += proto.SizeVarint(3<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.ValueInt))
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type PrintKVResponse struct {
}
func (m *PrintKVResponse) Reset() { *m = PrintKVResponse{} }
func (m *PrintKVResponse) String() string { return proto.CompactTextString(m) }
func (*PrintKVResponse) ProtoMessage() {}
func (*PrintKVResponse) | () ([]byte, []int) { return fileDescriptor0, []int{3} }
func init() {
proto.RegisterType((*TestRequest)(nil), "grpctest.TestRequest")
proto.RegisterType((*TestResponse)(nil), "grpctest.TestResponse")
proto.RegisterType((*PrintKVRequest)(nil), "grpctest.PrintKVRequest")
proto.RegisterType((*PrintKVResponse)(nil), "grpctest.PrintKVResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Test service
type TestClient interface {
Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error)
PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error)
}
type testClient struct {
cc *grpc.ClientConn
}
func NewTestClient(cc *grpc.ClientConn) TestClient {
return &testClient{cc}
}
func (c *testClient) Double(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) {
out := new(TestResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/Double", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *testClient) PrintKV(ctx context.Context, in *PrintKVRequest, opts ...grpc.CallOption) (*PrintKVResponse, error) {
out := new(PrintKVResponse)
err := grpc.Invoke(ctx, "/grpctest.Test/PrintKV", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Test service
type TestServer interface {
Double(context.Context, *TestRequest) (*TestResponse, error)
PrintKV(context.Context, *PrintKVRequest) (*PrintKVResponse, error)
}
func RegisterTestServer(s *grpc.Server, srv TestServer) {
s.RegisterService(&_Test_serviceDesc, srv)
}
func _Test_Double_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TestRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TestServer).Double(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/Double",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).Double(ctx, req.(*TestRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Test_PrintKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PrintKVRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TestServer).PrintKV(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpctest.Test/PrintKV",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TestServer).PrintKV(ctx, req.(*PrintKVRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Test_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpctest.Test",
HandlerType: (*TestServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Double",
Handler: _Test_Double_Handler,
},
{
MethodName: "PrintKV",
Handler: _Test_PrintKV_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "test.proto",
}
func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 240 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e,
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x48, 0x2f, 0x2a, 0x48, 0x06, 0xf1, 0x95, 0x94,
0xb9, 0xb8, 0x43, 0x52, 0x8b, 0x4b, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x44, 0xb8,
0x58, 0x3d, 0xf3, 0x0a, 0x4a, 0x4b, 0x24, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, 0x20, 0x1c, 0x25,
0x35, 0x2e, 0x1e, 0x88, 0xa2, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x21, 0x31, 0x2e, 0x36, 0xff,
0xd2, 0x12, 0x90, 0x32, 0x26, 0xb0, 0x32, 0x28, 0x4f, 0x29, 0x97, 0x8b, 0x2f, 0xa0, 0x28, 0x33,
0xaf, 0xc4, 0x3b, 0x0c, 0x66, 0x9e, 0x00, 0x17, 0xb3, 0x77, 0x6a, 0x25, 0xd8, 0x34, 0xce, 0x20,
0x10, 0x53, 0x48, 0x89, 0x8b, 0x3b, 0x2c, 0x31, 0xa7, 0x34, 0x35, 0xb8, 0xa4, 0x28, 0x33, 0x2f,
0x1d, 0x6c, 0x00, 0xa7, 0x07, 0x43, 0x10, 0xb2, 0xa0, 0x90, 0x0c, 0x17, 0x07, 0x98, 0xeb, 0x99,
0x57, 0x22, 0xc1, 0x0c, 0xb2, 0xc1, 0x83, 0x21, 0x08, 0x2e, 0xe2, 0xc4, 0xce, 0xc5, 0x0a, 0x66,
0x2b, 0x09, 0x72, 0xf1, 0xc3, 0xad, 0x83, 0xb8, 0xcc, 0xa8, 0x99, 0x91, 0x8b, 0x05, 0xe4, 0x54,
0x21, 0x4b, 0x2e, 0x36, 0x97, 0xfc, 0xd2, 0xa4, 0x9c, 0x54, 0x21, 0x51, 0x3d, 0x98, 0x67, 0xf5,
0x90, 0x7c, 0x2a, 0x25, 0x86, 0x2e, 0x0c, 0x31, 0x41, 0x89, 0x41, 0xc8, 0x81, 0x8b, 0x1d, 0x6a,
0xac, 0x90, 0x04, 0x42, 0x11, 0xaa, 0xc7, 0xa4, 0x24, 0xb1, 0xc8, 0xc0, 0x4c, 0x48, 0x62, 0x03,
0x87, 0xb2, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x34, 0x25, 0xf9, 0xb5, 0x73, 0x01, 0x00, 0x00,
}
| Descriptor | identifier_name |
mesintiket-gen3.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore, QtNetwork
import time
from datetime import datetime
import simplejson
import zlib
import sha
import base64
import config
import configusb
import binascii
import uuid
from decimal import Decimal
import zmq
import redis
import subprocess
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
from printer_driver import PrinterC1
from Route import Route
import os
import config
from gpslistener import GpsListener
from gpiolistener import GPIOListener
from LCD40X4 import GPIO, lcd_init, lcd_goto, lcd_string, GPIO
import logging
import logging.handlers
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(thread)d %(levelname)-5s %(message)s')
fh = logging.handlers.RotatingFileHandler('log.txt', maxBytes=10000000, backupCount=5)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
PRINTER_PORT = '/dev/ttyAMA0'
class | (QtCore.QThread):
update = QtCore.pyqtSignal(str)
def __init__(self, tosay):
QtCore.QThread.__init__(self)
self.tosay = tosay
def __del__(self):
self.wait()
def run(self):
subprocess.call('espeak -vid+f3 "%s"' % self.tosay, shell=True)
#~ self.terminate()
class MainApp(QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
self.context = zmq.Context()
self.dblayer = self.context.socket(zmq.REQ)
self.dblayer.connect("tcp://%s:%s" % (config.server_ip, config.server_port))
self.redis = redis.Redis('localhost')
self.route = Route(config.route, config.destinations)
# start new thread to listen to gps signal
self.gpsThread = GpsListener(configusb.gpsusbport, config.gps_baudrate)
self.gpsThread.message.connect(self.gpsReceived)
self.gpsThread.sat_info.connect(self.sat_infoReceived)
self.gpsThread.speed.connect(self.speedReceived)
self.gpsThread.start()
# start new thread to listen to gpio signal
dests = []
for d in config.destinations[0]:
dests.append(d)
self.gpioThread = GPIOListener(dests)
self.gpioThread.destinationPressed.connect(self.destinationChosen)
self.gpioThread.printPressed.connect(self.printTicket)
self.gpioThread.directionSwitched.connect(self.switchDirection)
self.gpioThread.start()
# Timer for sending position every 60 seconds to server
self.sendGpsTimer = QtCore.QTimer(self)
self.sendGpsTimer.timeout.connect(self.sendGpsPosition)
self.sendGpsTimer.start(60000)
# current state (Agent, destination, price)
self.currentAgent = None
self.currentDestination = None
self.currentDistance = None
self.currentLon = None
self.currentLat = None
# print init messages to printer
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
p.printString('Mesin Tiket Bus')
p.printString( config.company_name)
p.printString( 'Bus: %s' % config.bus_plateno)
p.cutPaper(0)
p.close()
# init LCD
#GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
#GPIO.setup(LCD_E, GPIO.OUT) # E
#GPIO.setup(LCD_E2, GPIO.OUT) # E2
#GPIO.setup(LCD_RS, GPIO.OUT) # RS
#GPIO.setup(LCD_D4, GPIO.OUT) # DB4
#GPIO.setup(LCD_D5, GPIO.OUT) # DB5
#GPIO.setup(LCD_D6, GPIO.OUT) # DB6
#GPIO.setup(LCD_D7, GPIO.OUT) # DB7
#GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable
# Initialise display
lcd_init()
lcd_string('Inisiasi sistem selesai..', 1, 1)
self.updateRouteDisplay()
logger.debug('init finished')
self.say('Mesin tiket siap digunakan')
def sendGpsPosition(self):
#~ logger.debug(self.gpsThread.lastpos)
try:
if self.gpsThread.lastpos:
# ITPRO861001000786141,11025.595867,-659.625256,31,20121008035615.000,15,0,13,1,
gprmclon = 100 *(int(self.gpsThread.lastpos['lon']) + ((self.gpsThread.lastpos['lon'] - int(self.gpsThread.lastpos['lon'])) / 100 * 60))
gprmclat = 100 *(int(self.gpsThread.lastpos['lat']) + ((self.gpsThread.lastpos['lat'] - int(self.gpsThread.lastpos['lat'])) / 100 * 60))
gpsmsg = 'ITPRO%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n' % (
config.bus_plateno,
gprmclon,
gprmclat,
self.gpsThread.lastpos['alt'],
time.strftime("%Y%m%d%H%M%S", time.gmtime(self.gpsThread.lastpos['gpsdt'])),
self.gpsThread.lastpos['no_sat'],
self.gpsThread.lastpos['speed'],
self.gpsThread.lastpos['bearing'],
'A',
self.gpsThread.lastpos['ext_power'],
'',
)
logger.debug('SENDGPSINFO: %s' % gpsmsg)
self.redis.rpush('mq', gpsmsg)
else:
logger.info('SENDGPSINFO: GPS not set, not sending position to server..')
except Exception:
e = sys.exc_info()
logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1]))
def gpsReceived(self, gpsPos):
#~ print newpos
logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos)))
if gpsPos['type'] == 0:
if gpsPos['lon'] and gpsPos['lat']:
#self.updateTrackPosition(gpsPos)
#lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3)
self.currentLon = gpsPos['lon']
self.currentLat = gpsPos['lat']
curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents)
if self.currentAgent != curAgent:
self.updateCurrentAgent(curAgent)
#self.updateDestinations()
if not curAgent:
self.updateCurrentAgent('Di luar area')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
self.currentAgent = curAgent
else:
self.updateCurrentAgent('Belum mendapat sinyal GPS..')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
def updateCurrentAgent(self, newAgent):
logger.debug('updateCurrentagent: %s' % newAgent)
lcd_string('Agen: %s' % newAgent, 1, 1)
def updateDestinationPriceDistance(self, dest, price, distance):
if dest != '---':
lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2)
self.currentDestination = dest
self.currentPrice = price
self.currentDistance = distance
else:
lcd_string("Tujuan: --- Harga: ---", 1, 2)
self.currentDestination = None
self.currentPrice = None
self.currentDistance = None
def updateStatus(self, status, showTime):
lcd_string('{0}'.format(status), 1, 3)
self.say(status)
QtCore.QTimer.singleShot(showTime, self.resetStatus)
def resetStatus(self):
lcd_string('', 1, 3)
def speedReceived(self, speed):
lcd_goto(('%s kmh' % speed).ljust(7),0,4)
def sat_infoReceived(self, sat_info):
lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4)
def updateNoTicket(self, ticket_no):
lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4)
def updateRouteDisplay(self):
dests = self.route.getDestinationNames()
lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4)
def getAgentInCurrentLocation(self, gpsPos, agents):
#~ print curPos, agents
for agent in agents:
dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon']))
#~ print dist
if dist <= (agents[agent]['radius'] / 1000.0):
return agent
return None
def destinationChosen(self, dest_qstring):
dest = str(dest_qstring)
if dest:
if self.currentAgent:
# check if selected destination is valid
if self.route.simpleDistanceTo(config.agents[self.currentAgent]['latlon'], normalized = True) < self.route.simpleDistanceTo(self.route.getDestinations()[dest]['latlon'], normalized = True):
distance = self.route.distanceTo(
config.agents[self.currentAgent]['latlon'],
self.route.getDestinations()[dest]['latlon']
)
price = self.calculatePrice(self.currentAgent, dest, distance)
self.updateDestinationPriceDistance(dest, price, distance)
self.say(dest)
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Tujuan tidak valid', 2000)
else:
self.updateStatus('Di luar agen', 2000)
self.updateDestinationPriceDistance('---', '---', '---')
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Error pemilihan tujuan', 2000)
def say(self, tosay):
subprocess.call('espeak -vid+f3 "%s" 2>/dev/null &' % tosay, shell=True)
#~ speechThread = SpeechThread(tosay)
#~ speechThread.start()
pass
def calculatePrice(self, fromAgent, destination, distance):
print (fromAgent, destination, distance)
for prices in self.route.getDestinations()[destination]['pricelist']:
if fromAgent in prices['from']:
return prices['price']
#~ return max(config.minimal_price, math.ceil((distance*config.price_per_km)/1000.0) * 1000)
return 0
def printTicket(self):
if self.currentAgent in config.agents:
if self.currentDestination: # if any destination selected
dest = self.currentDestination
# print ticket
#~ try:
if config.printer_enabled:
self.say('Mencetak tiket ke %s' % dest)
gpsdt = self.gpsThread.lastpos['gpsdt']
curdt = datetime.fromtimestamp(gpsdt)
# initialize or increment global ticket counter
if not self.redis.get('discountTicketCounter'):
self.redis.set('discountTicketCounter', 0)
self.redis.incr('discountTicketCounter')
# initialize or increment daily ticket counter
if not self.redis.get(curdt.strftime('%Y%m%d:ticket_no')):
self.redis.set(curdt.strftime('%Y%m%d:ticket_no'), 0)
self.redis.incr(curdt.strftime('%Y%m%d:ticket_no'))
isTicketFree = False
if int(self.redis.get('discountTicketCounter')) >= 100:
isTicketFree = True
self.redis.set('discountTicketCounter', 0)
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
if isTicketFree:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('GRATIS PROMO')
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
else:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('Rp {0:,}'.format(self.currentPrice), 2, 4)
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
p.cutPaper(0)
p.close()
#~ print 'PO. Sumber Alam'
#~ print config.bus_plateno
#~ print curdt.strftime('%d-%b-%Y %H:%M', time.localtime(curtime))
#~ print 'Tujuan: {}'.format(destListBox.selectedItem)
#~ print 'Jarak: 0{:.1f} km'.format(distance)
#~ print 'Harga: Rp. {0:.0f}'.format(self.ui.lblPrice.text())
#~ print '%03d%010d' % (config.bus_id, int(curtime))
# initialize or add daily total setoran
if not self.redis.get(curdt.strftime('%Y%m%d:setoran')):
self.redis.set(curdt.strftime('%Y%m%d:setoran'), 0)
self.redis.set(curdt.strftime('%Y%m%d:setoran'), int(self.redis.get(curdt.strftime('%Y%m%d:setoran'))) + self.currentPrice)
self.redis.rpush('mq', '$TIKET%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\r\n' % (
config.bus_plateno,
gpsdt,
self.redis.get(curdt.strftime('%Y%m%d:ticket_no')),
self.redis.get(curdt.strftime('%Y%m%d:setoran')),
self.currentAgent,
dest,
self.currentPrice if not isTicketFree else '0',
'{0:.1f}'.format(self.currentDistance),
self.currentLon,
self.currentLat
))
self.updateNoTicket(self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
try:
self.redis.save()
except Exception:
e = sys.exc_info()
logger.warning('Error redis.save(), maybe redis is saving, it is OK: %s %s' % (e[0], e[1]))
#~ except Exception as ex:
#~ logger.error('cannot print ticket %s' % ex )
#~ self.ui.statusbar.showMessage('Printer error', 2000)
else:
self.updateStatus('Tujuan belum dipilih', 2000)
else:
# show info that cannot print ticket because outside of agent area
self.updateStatus('Di luar agen', 2000)
def switchDirection(self, direction):
logger.info('Switched direction')
self.updateDestinationPriceDistance('---', '---', '---')
self.route.switchDirection()
self.updateRouteDisplay()
self.say('Ganti arah')
def drawTrackBackground(self):
linePosY = self.scene.height() / 2
# draw horizontal line
self.scene.addLine(self.marginLR, linePosY, self.marginLR + self.linelength, linePosY)
alternateUpDown = True
prevUpRightX = 0
prevDownRightX = 0
wasDownDown = False
wasUpUp = False
# draw agency fonts
for dest in config.agents:
destItem = self.scene.addSimpleText(dest)
distanceDestFactor = self.route.simpleDistanceTo(config.agents[dest]['latlon'], normalized = True)
posLineX = self.marginLR + (distanceDestFactor * self.linelength)
posTextX = posLineX
if (posTextX - int(destItem.boundingRect().width() / 2)) < 0:
# first text
posTextX = 0
elif (posTextX + int(destItem.boundingRect().width() / 2)) > self.scene.width():
posTextX = self.size[0] - destItem.boundingRect().width()
else:
posTextX = posTextX - int(destItem.boundingRect().width() / 2)
posTextY = 0
if alternateUpDown:
# text below line
if wasDownDown or ((posTextX - self.marginLR) >= prevDownRightX):
posTextY = linePosY + 10
wasDownDown = False
else:
posTextY = linePosY + 10 + destItem.boundingRect().height()
wasDownDown = True
else:
# text above line
if wasUpUp or ((posTextX - self.marginLR) >= prevUpRightX):
posTextY= linePosY - (6 + destItem.boundingRect().height())
wasUpUp = False
else:
posTextY= linePosY - (6 + (2 *destItem.boundingRect().height()))
wasUpUp = True
destItem.setPos(posTextX, posTextY)
# draw connecting line between horizontal to font
self.scene.addLine(
posLineX, linePosY,
posLineX, posTextY if alternateUpDown else posTextY + destItem.boundingRect().height() - 4
)
if alternateUpDown:
prevDownRightX = posTextX - self.marginLR + destItem.boundingRect().width()
else:
prevUpRightX = posTextX - self.marginLR + destItem.boundingRect().width()
alternateUpDown = not alternateUpDown
def updateTrackDirection(self):
if self.arrow:
shape = [ (-10, - 8 ), (10, 0), (-10, 8 ), (0, 0) ] if self.route.mode == '>' else [ (10, - 8 ), (-10, 0), (10, 8 ), (0, 0) ]
pol = QtGui.QPolygonF()
for point in shape:
pol.append(QtCore.QPointF(point[0], point[1]))
self.arrow.setPolygon(pol)
else:
self.ui.statusbar.showMessage('Menunggu sinyal GPS...', 2000)
def updateTrackPosition(self, gpsPos):
# draw arrow showing actual bus position
if not self.arrow:
self.arrow = QtGui.QGraphicsPolygonItem()
self.arrow.setBrush(QtCore.Qt.red)
self.arrow.setPen(QtCore.Qt.red)
self.arrow.setVisible(False)
self.scene.addItem(self.arrow)
self.arrow.direction = None
if not self.arrow.direction or (self.arrow.direction != self.route.mode):
self.updateTrackDirection()
self.arrow.direction = self.route.mode
progress = self.route.simpleDistanceTo((gpsPos[QtCore.QString('lon')], gpsPos[QtCore.QString('lat')]), normalized = True)
if self.route.mode == '<':
progress = 1.0 - progress
self.arrow.setPos(self.marginLR+int(self.linelength*progress), self.scene.height() / 2)
self.arrow.setVisible(True)
def main():
app = QtCore.QCoreApplication(sys.argv)
#app.setStyle(QtGui.QStyleFactory.create("plastique"))
ex = MainApp()
#~ ex.show()
#ex.showFullScreen()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| SpeechThread | identifier_name |
mesintiket-gen3.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore, QtNetwork
import time
from datetime import datetime
import simplejson
import zlib
import sha
import base64
import config
import configusb
import binascii
import uuid
from decimal import Decimal
import zmq
import redis
import subprocess
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
from printer_driver import PrinterC1
from Route import Route
import os
import config
from gpslistener import GpsListener
from gpiolistener import GPIOListener
from LCD40X4 import GPIO, lcd_init, lcd_goto, lcd_string, GPIO
import logging
import logging.handlers
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(thread)d %(levelname)-5s %(message)s')
fh = logging.handlers.RotatingFileHandler('log.txt', maxBytes=10000000, backupCount=5)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
PRINTER_PORT = '/dev/ttyAMA0'
class SpeechThread(QtCore.QThread):
update = QtCore.pyqtSignal(str)
def __init__(self, tosay):
QtCore.QThread.__init__(self)
self.tosay = tosay
def __del__(self):
self.wait()
def run(self):
subprocess.call('espeak -vid+f3 "%s"' % self.tosay, shell=True)
#~ self.terminate()
class MainApp(QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
self.context = zmq.Context()
self.dblayer = self.context.socket(zmq.REQ)
self.dblayer.connect("tcp://%s:%s" % (config.server_ip, config.server_port))
self.redis = redis.Redis('localhost')
self.route = Route(config.route, config.destinations)
# start new thread to listen to gps signal
self.gpsThread = GpsListener(configusb.gpsusbport, config.gps_baudrate)
self.gpsThread.message.connect(self.gpsReceived)
self.gpsThread.sat_info.connect(self.sat_infoReceived)
self.gpsThread.speed.connect(self.speedReceived)
self.gpsThread.start()
# start new thread to listen to gpio signal
dests = []
for d in config.destinations[0]:
dests.append(d)
self.gpioThread = GPIOListener(dests)
self.gpioThread.destinationPressed.connect(self.destinationChosen)
self.gpioThread.printPressed.connect(self.printTicket)
self.gpioThread.directionSwitched.connect(self.switchDirection)
self.gpioThread.start()
# Timer for sending position every 60 seconds to server
self.sendGpsTimer = QtCore.QTimer(self)
self.sendGpsTimer.timeout.connect(self.sendGpsPosition)
self.sendGpsTimer.start(60000)
# current state (Agent, destination, price)
self.currentAgent = None
self.currentDestination = None
self.currentDistance = None
self.currentLon = None
self.currentLat = None
# print init messages to printer
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
p.printString('Mesin Tiket Bus')
p.printString( config.company_name)
p.printString( 'Bus: %s' % config.bus_plateno)
p.cutPaper(0)
p.close()
# init LCD
#GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
#GPIO.setup(LCD_E, GPIO.OUT) # E
#GPIO.setup(LCD_E2, GPIO.OUT) # E2
#GPIO.setup(LCD_RS, GPIO.OUT) # RS
#GPIO.setup(LCD_D4, GPIO.OUT) # DB4
#GPIO.setup(LCD_D5, GPIO.OUT) # DB5
#GPIO.setup(LCD_D6, GPIO.OUT) # DB6
#GPIO.setup(LCD_D7, GPIO.OUT) # DB7
#GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable
# Initialise display
lcd_init()
lcd_string('Inisiasi sistem selesai..', 1, 1)
self.updateRouteDisplay()
logger.debug('init finished')
self.say('Mesin tiket siap digunakan')
def sendGpsPosition(self):
#~ logger.debug(self.gpsThread.lastpos)
try:
if self.gpsThread.lastpos:
# ITPRO861001000786141,11025.595867,-659.625256,31,20121008035615.000,15,0,13,1,
gprmclon = 100 *(int(self.gpsThread.lastpos['lon']) + ((self.gpsThread.lastpos['lon'] - int(self.gpsThread.lastpos['lon'])) / 100 * 60))
gprmclat = 100 *(int(self.gpsThread.lastpos['lat']) + ((self.gpsThread.lastpos['lat'] - int(self.gpsThread.lastpos['lat'])) / 100 * 60))
gpsmsg = 'ITPRO%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n' % (
config.bus_plateno,
gprmclon,
gprmclat,
self.gpsThread.lastpos['alt'],
time.strftime("%Y%m%d%H%M%S", time.gmtime(self.gpsThread.lastpos['gpsdt'])),
self.gpsThread.lastpos['no_sat'],
self.gpsThread.lastpos['speed'],
self.gpsThread.lastpos['bearing'],
'A',
self.gpsThread.lastpos['ext_power'],
'',
)
logger.debug('SENDGPSINFO: %s' % gpsmsg)
self.redis.rpush('mq', gpsmsg)
else:
logger.info('SENDGPSINFO: GPS not set, not sending position to server..')
except Exception:
e = sys.exc_info()
logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1]))
def gpsReceived(self, gpsPos):
#~ print newpos
logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos)))
if gpsPos['type'] == 0:
if gpsPos['lon'] and gpsPos['lat']:
#self.updateTrackPosition(gpsPos)
#lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3)
self.currentLon = gpsPos['lon']
self.currentLat = gpsPos['lat']
curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents)
if self.currentAgent != curAgent:
self.updateCurrentAgent(curAgent)
#self.updateDestinations()
if not curAgent:
self.updateCurrentAgent('Di luar area')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
self.currentAgent = curAgent
else:
self.updateCurrentAgent('Belum mendapat sinyal GPS..')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
def updateCurrentAgent(self, newAgent):
logger.debug('updateCurrentagent: %s' % newAgent)
lcd_string('Agen: %s' % newAgent, 1, 1)
def updateDestinationPriceDistance(self, dest, price, distance):
if dest != '---':
lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2)
self.currentDestination = dest
self.currentPrice = price
self.currentDistance = distance
else:
lcd_string("Tujuan: --- Harga: ---", 1, 2)
self.currentDestination = None
self.currentPrice = None
self.currentDistance = None
def updateStatus(self, status, showTime):
lcd_string('{0}'.format(status), 1, 3)
self.say(status)
QtCore.QTimer.singleShot(showTime, self.resetStatus)
def resetStatus(self):
lcd_string('', 1, 3)
def speedReceived(self, speed):
lcd_goto(('%s kmh' % speed).ljust(7),0,4)
def sat_infoReceived(self, sat_info):
lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4)
def updateNoTicket(self, ticket_no):
lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4)
def updateRouteDisplay(self):
dests = self.route.getDestinationNames()
lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4)
def getAgentInCurrentLocation(self, gpsPos, agents):
#~ print curPos, agents
for agent in agents:
dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon']))
#~ print dist
if dist <= (agents[agent]['radius'] / 1000.0):
return agent
return None
def destinationChosen(self, dest_qstring):
dest = str(dest_qstring)
if dest:
if self.currentAgent:
# check if selected destination is valid
if self.route.simpleDistanceTo(config.agents[self.currentAgent]['latlon'], normalized = True) < self.route.simpleDistanceTo(self.route.getDestinations()[dest]['latlon'], normalized = True):
distance = self.route.distanceTo(
config.agents[self.currentAgent]['latlon'],
self.route.getDestinations()[dest]['latlon']
)
price = self.calculatePrice(self.currentAgent, dest, distance)
self.updateDestinationPriceDistance(dest, price, distance)
self.say(dest)
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Tujuan tidak valid', 2000)
else:
self.updateStatus('Di luar agen', 2000)
self.updateDestinationPriceDistance('---', '---', '---')
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Error pemilihan tujuan', 2000)
def say(self, tosay):
subprocess.call('espeak -vid+f3 "%s" 2>/dev/null &' % tosay, shell=True)
#~ speechThread = SpeechThread(tosay)
#~ speechThread.start()
pass
def calculatePrice(self, fromAgent, destination, distance):
print (fromAgent, destination, distance)
for prices in self.route.getDestinations()[destination]['pricelist']:
if fromAgent in prices['from']:
return prices['price']
#~ return max(config.minimal_price, math.ceil((distance*config.price_per_km)/1000.0) * 1000)
return 0
def printTicket(self):
if self.currentAgent in config.agents:
if self.currentDestination: # if any destination selected
dest = self.currentDestination
# print ticket
#~ try:
if config.printer_enabled:
self.say('Mencetak tiket ke %s' % dest)
gpsdt = self.gpsThread.lastpos['gpsdt']
curdt = datetime.fromtimestamp(gpsdt)
# initialize or increment global ticket counter
if not self.redis.get('discountTicketCounter'):
self.redis.set('discountTicketCounter', 0)
self.redis.incr('discountTicketCounter')
# initialize or increment daily ticket counter
if not self.redis.get(curdt.strftime('%Y%m%d:ticket_no')):
self.redis.set(curdt.strftime('%Y%m%d:ticket_no'), 0)
self.redis.incr(curdt.strftime('%Y%m%d:ticket_no'))
isTicketFree = False
if int(self.redis.get('discountTicketCounter')) >= 100:
isTicketFree = True
self.redis.set('discountTicketCounter', 0)
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
if isTicketFree:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('GRATIS PROMO')
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
else:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('Rp {0:,}'.format(self.currentPrice), 2, 4)
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
p.cutPaper(0)
p.close()
#~ print 'PO. Sumber Alam'
#~ print config.bus_plateno
#~ print curdt.strftime('%d-%b-%Y %H:%M', time.localtime(curtime))
#~ print 'Tujuan: {}'.format(destListBox.selectedItem)
#~ print 'Jarak: 0{:.1f} km'.format(distance)
#~ print 'Harga: Rp. {0:.0f}'.format(self.ui.lblPrice.text())
#~ print '%03d%010d' % (config.bus_id, int(curtime))
# initialize or add daily total setoran
if not self.redis.get(curdt.strftime('%Y%m%d:setoran')):
|
self.redis.set(curdt.strftime('%Y%m%d:setoran'), int(self.redis.get(curdt.strftime('%Y%m%d:setoran'))) + self.currentPrice)
self.redis.rpush('mq', '$TIKET%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\r\n' % (
config.bus_plateno,
gpsdt,
self.redis.get(curdt.strftime('%Y%m%d:ticket_no')),
self.redis.get(curdt.strftime('%Y%m%d:setoran')),
self.currentAgent,
dest,
self.currentPrice if not isTicketFree else '0',
'{0:.1f}'.format(self.currentDistance),
self.currentLon,
self.currentLat
))
self.updateNoTicket(self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
try:
self.redis.save()
except Exception:
e = sys.exc_info()
logger.warning('Error redis.save(), maybe redis is saving, it is OK: %s %s' % (e[0], e[1]))
#~ except Exception as ex:
#~ logger.error('cannot print ticket %s' % ex )
#~ self.ui.statusbar.showMessage('Printer error', 2000)
else:
self.updateStatus('Tujuan belum dipilih', 2000)
else:
# show info that cannot print ticket because outside of agent area
self.updateStatus('Di luar agen', 2000)
def switchDirection(self, direction):
logger.info('Switched direction')
self.updateDestinationPriceDistance('---', '---', '---')
self.route.switchDirection()
self.updateRouteDisplay()
self.say('Ganti arah')
def drawTrackBackground(self):
linePosY = self.scene.height() / 2
# draw horizontal line
self.scene.addLine(self.marginLR, linePosY, self.marginLR + self.linelength, linePosY)
alternateUpDown = True
prevUpRightX = 0
prevDownRightX = 0
wasDownDown = False
wasUpUp = False
# draw agency fonts
for dest in config.agents:
destItem = self.scene.addSimpleText(dest)
distanceDestFactor = self.route.simpleDistanceTo(config.agents[dest]['latlon'], normalized = True)
posLineX = self.marginLR + (distanceDestFactor * self.linelength)
posTextX = posLineX
if (posTextX - int(destItem.boundingRect().width() / 2)) < 0:
# first text
posTextX = 0
elif (posTextX + int(destItem.boundingRect().width() / 2)) > self.scene.width():
posTextX = self.size[0] - destItem.boundingRect().width()
else:
posTextX = posTextX - int(destItem.boundingRect().width() / 2)
posTextY = 0
if alternateUpDown:
# text below line
if wasDownDown or ((posTextX - self.marginLR) >= prevDownRightX):
posTextY = linePosY + 10
wasDownDown = False
else:
posTextY = linePosY + 10 + destItem.boundingRect().height()
wasDownDown = True
else:
# text above line
if wasUpUp or ((posTextX - self.marginLR) >= prevUpRightX):
posTextY= linePosY - (6 + destItem.boundingRect().height())
wasUpUp = False
else:
posTextY= linePosY - (6 + (2 *destItem.boundingRect().height()))
wasUpUp = True
destItem.setPos(posTextX, posTextY)
# draw connecting line between horizontal to font
self.scene.addLine(
posLineX, linePosY,
posLineX, posTextY if alternateUpDown else posTextY + destItem.boundingRect().height() - 4
)
if alternateUpDown:
prevDownRightX = posTextX - self.marginLR + destItem.boundingRect().width()
else:
prevUpRightX = posTextX - self.marginLR + destItem.boundingRect().width()
alternateUpDown = not alternateUpDown
def updateTrackDirection(self):
if self.arrow:
shape = [ (-10, - 8 ), (10, 0), (-10, 8 ), (0, 0) ] if self.route.mode == '>' else [ (10, - 8 ), (-10, 0), (10, 8 ), (0, 0) ]
pol = QtGui.QPolygonF()
for point in shape:
pol.append(QtCore.QPointF(point[0], point[1]))
self.arrow.setPolygon(pol)
else:
self.ui.statusbar.showMessage('Menunggu sinyal GPS...', 2000)
def updateTrackPosition(self, gpsPos):
# draw arrow showing actual bus position
if not self.arrow:
self.arrow = QtGui.QGraphicsPolygonItem()
self.arrow.setBrush(QtCore.Qt.red)
self.arrow.setPen(QtCore.Qt.red)
self.arrow.setVisible(False)
self.scene.addItem(self.arrow)
self.arrow.direction = None
if not self.arrow.direction or (self.arrow.direction != self.route.mode):
self.updateTrackDirection()
self.arrow.direction = self.route.mode
progress = self.route.simpleDistanceTo((gpsPos[QtCore.QString('lon')], gpsPos[QtCore.QString('lat')]), normalized = True)
if self.route.mode == '<':
progress = 1.0 - progress
self.arrow.setPos(self.marginLR+int(self.linelength*progress), self.scene.height() / 2)
self.arrow.setVisible(True)
def main():
app = QtCore.QCoreApplication(sys.argv)
#app.setStyle(QtGui.QStyleFactory.create("plastique"))
ex = MainApp()
#~ ex.show()
#ex.showFullScreen()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| self.redis.set(curdt.strftime('%Y%m%d:setoran'), 0) | conditional_block |
mesintiket-gen3.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore, QtNetwork
import time
from datetime import datetime
import simplejson
import zlib
import sha
import base64
import config
import configusb
import binascii
import uuid
from decimal import Decimal
import zmq
import redis
import subprocess
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
from printer_driver import PrinterC1
from Route import Route
import os
import config |
import logging
import logging.handlers
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(thread)d %(levelname)-5s %(message)s')
fh = logging.handlers.RotatingFileHandler('log.txt', maxBytes=10000000, backupCount=5)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
PRINTER_PORT = '/dev/ttyAMA0'
class SpeechThread(QtCore.QThread):
update = QtCore.pyqtSignal(str)
def __init__(self, tosay):
QtCore.QThread.__init__(self)
self.tosay = tosay
def __del__(self):
self.wait()
def run(self):
subprocess.call('espeak -vid+f3 "%s"' % self.tosay, shell=True)
#~ self.terminate()
class MainApp(QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
self.context = zmq.Context()
self.dblayer = self.context.socket(zmq.REQ)
self.dblayer.connect("tcp://%s:%s" % (config.server_ip, config.server_port))
self.redis = redis.Redis('localhost')
self.route = Route(config.route, config.destinations)
# start new thread to listen to gps signal
self.gpsThread = GpsListener(configusb.gpsusbport, config.gps_baudrate)
self.gpsThread.message.connect(self.gpsReceived)
self.gpsThread.sat_info.connect(self.sat_infoReceived)
self.gpsThread.speed.connect(self.speedReceived)
self.gpsThread.start()
# start new thread to listen to gpio signal
dests = []
for d in config.destinations[0]:
dests.append(d)
self.gpioThread = GPIOListener(dests)
self.gpioThread.destinationPressed.connect(self.destinationChosen)
self.gpioThread.printPressed.connect(self.printTicket)
self.gpioThread.directionSwitched.connect(self.switchDirection)
self.gpioThread.start()
# Timer for sending position every 60 seconds to server
self.sendGpsTimer = QtCore.QTimer(self)
self.sendGpsTimer.timeout.connect(self.sendGpsPosition)
self.sendGpsTimer.start(60000)
# current state (Agent, destination, price)
self.currentAgent = None
self.currentDestination = None
self.currentDistance = None
self.currentLon = None
self.currentLat = None
# print init messages to printer
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
p.printString('Mesin Tiket Bus')
p.printString( config.company_name)
p.printString( 'Bus: %s' % config.bus_plateno)
p.cutPaper(0)
p.close()
# init LCD
#GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
#GPIO.setup(LCD_E, GPIO.OUT) # E
#GPIO.setup(LCD_E2, GPIO.OUT) # E2
#GPIO.setup(LCD_RS, GPIO.OUT) # RS
#GPIO.setup(LCD_D4, GPIO.OUT) # DB4
#GPIO.setup(LCD_D5, GPIO.OUT) # DB5
#GPIO.setup(LCD_D6, GPIO.OUT) # DB6
#GPIO.setup(LCD_D7, GPIO.OUT) # DB7
#GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable
# Initialise display
lcd_init()
lcd_string('Inisiasi sistem selesai..', 1, 1)
self.updateRouteDisplay()
logger.debug('init finished')
self.say('Mesin tiket siap digunakan')
def sendGpsPosition(self):
#~ logger.debug(self.gpsThread.lastpos)
try:
if self.gpsThread.lastpos:
# ITPRO861001000786141,11025.595867,-659.625256,31,20121008035615.000,15,0,13,1,
gprmclon = 100 *(int(self.gpsThread.lastpos['lon']) + ((self.gpsThread.lastpos['lon'] - int(self.gpsThread.lastpos['lon'])) / 100 * 60))
gprmclat = 100 *(int(self.gpsThread.lastpos['lat']) + ((self.gpsThread.lastpos['lat'] - int(self.gpsThread.lastpos['lat'])) / 100 * 60))
gpsmsg = 'ITPRO%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n' % (
config.bus_plateno,
gprmclon,
gprmclat,
self.gpsThread.lastpos['alt'],
time.strftime("%Y%m%d%H%M%S", time.gmtime(self.gpsThread.lastpos['gpsdt'])),
self.gpsThread.lastpos['no_sat'],
self.gpsThread.lastpos['speed'],
self.gpsThread.lastpos['bearing'],
'A',
self.gpsThread.lastpos['ext_power'],
'',
)
logger.debug('SENDGPSINFO: %s' % gpsmsg)
self.redis.rpush('mq', gpsmsg)
else:
logger.info('SENDGPSINFO: GPS not set, not sending position to server..')
except Exception:
e = sys.exc_info()
logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1]))
def gpsReceived(self, gpsPos):
#~ print newpos
logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos)))
if gpsPos['type'] == 0:
if gpsPos['lon'] and gpsPos['lat']:
#self.updateTrackPosition(gpsPos)
#lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3)
self.currentLon = gpsPos['lon']
self.currentLat = gpsPos['lat']
curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents)
if self.currentAgent != curAgent:
self.updateCurrentAgent(curAgent)
#self.updateDestinations()
if not curAgent:
self.updateCurrentAgent('Di luar area')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
self.currentAgent = curAgent
else:
self.updateCurrentAgent('Belum mendapat sinyal GPS..')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
def updateCurrentAgent(self, newAgent):
logger.debug('updateCurrentagent: %s' % newAgent)
lcd_string('Agen: %s' % newAgent, 1, 1)
def updateDestinationPriceDistance(self, dest, price, distance):
if dest != '---':
lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2)
self.currentDestination = dest
self.currentPrice = price
self.currentDistance = distance
else:
lcd_string("Tujuan: --- Harga: ---", 1, 2)
self.currentDestination = None
self.currentPrice = None
self.currentDistance = None
def updateStatus(self, status, showTime):
lcd_string('{0}'.format(status), 1, 3)
self.say(status)
QtCore.QTimer.singleShot(showTime, self.resetStatus)
def resetStatus(self):
lcd_string('', 1, 3)
def speedReceived(self, speed):
lcd_goto(('%s kmh' % speed).ljust(7),0,4)
def sat_infoReceived(self, sat_info):
lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4)
def updateNoTicket(self, ticket_no):
lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4)
def updateRouteDisplay(self):
dests = self.route.getDestinationNames()
lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4)
def getAgentInCurrentLocation(self, gpsPos, agents):
#~ print curPos, agents
for agent in agents:
dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon']))
#~ print dist
if dist <= (agents[agent]['radius'] / 1000.0):
return agent
return None
def destinationChosen(self, dest_qstring):
dest = str(dest_qstring)
if dest:
if self.currentAgent:
# check if selected destination is valid
if self.route.simpleDistanceTo(config.agents[self.currentAgent]['latlon'], normalized = True) < self.route.simpleDistanceTo(self.route.getDestinations()[dest]['latlon'], normalized = True):
distance = self.route.distanceTo(
config.agents[self.currentAgent]['latlon'],
self.route.getDestinations()[dest]['latlon']
)
price = self.calculatePrice(self.currentAgent, dest, distance)
self.updateDestinationPriceDistance(dest, price, distance)
self.say(dest)
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Tujuan tidak valid', 2000)
else:
self.updateStatus('Di luar agen', 2000)
self.updateDestinationPriceDistance('---', '---', '---')
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Error pemilihan tujuan', 2000)
def say(self, tosay):
subprocess.call('espeak -vid+f3 "%s" 2>/dev/null &' % tosay, shell=True)
#~ speechThread = SpeechThread(tosay)
#~ speechThread.start()
pass
def calculatePrice(self, fromAgent, destination, distance):
print (fromAgent, destination, distance)
for prices in self.route.getDestinations()[destination]['pricelist']:
if fromAgent in prices['from']:
return prices['price']
#~ return max(config.minimal_price, math.ceil((distance*config.price_per_km)/1000.0) * 1000)
return 0
def printTicket(self):
if self.currentAgent in config.agents:
if self.currentDestination: # if any destination selected
dest = self.currentDestination
# print ticket
#~ try:
if config.printer_enabled:
self.say('Mencetak tiket ke %s' % dest)
gpsdt = self.gpsThread.lastpos['gpsdt']
curdt = datetime.fromtimestamp(gpsdt)
# initialize or increment global ticket counter
if not self.redis.get('discountTicketCounter'):
self.redis.set('discountTicketCounter', 0)
self.redis.incr('discountTicketCounter')
# initialize or increment daily ticket counter
if not self.redis.get(curdt.strftime('%Y%m%d:ticket_no')):
self.redis.set(curdt.strftime('%Y%m%d:ticket_no'), 0)
self.redis.incr(curdt.strftime('%Y%m%d:ticket_no'))
isTicketFree = False
if int(self.redis.get('discountTicketCounter')) >= 100:
isTicketFree = True
self.redis.set('discountTicketCounter', 0)
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
if isTicketFree:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('GRATIS PROMO')
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
else:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('Rp {0:,}'.format(self.currentPrice), 2, 4)
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
p.cutPaper(0)
p.close()
#~ print 'PO. Sumber Alam'
#~ print config.bus_plateno
#~ print curdt.strftime('%d-%b-%Y %H:%M', time.localtime(curtime))
#~ print 'Tujuan: {}'.format(destListBox.selectedItem)
#~ print 'Jarak: 0{:.1f} km'.format(distance)
#~ print 'Harga: Rp. {0:.0f}'.format(self.ui.lblPrice.text())
#~ print '%03d%010d' % (config.bus_id, int(curtime))
# initialize or add daily total setoran
if not self.redis.get(curdt.strftime('%Y%m%d:setoran')):
self.redis.set(curdt.strftime('%Y%m%d:setoran'), 0)
self.redis.set(curdt.strftime('%Y%m%d:setoran'), int(self.redis.get(curdt.strftime('%Y%m%d:setoran'))) + self.currentPrice)
self.redis.rpush('mq', '$TIKET%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\r\n' % (
config.bus_plateno,
gpsdt,
self.redis.get(curdt.strftime('%Y%m%d:ticket_no')),
self.redis.get(curdt.strftime('%Y%m%d:setoran')),
self.currentAgent,
dest,
self.currentPrice if not isTicketFree else '0',
'{0:.1f}'.format(self.currentDistance),
self.currentLon,
self.currentLat
))
self.updateNoTicket(self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
try:
self.redis.save()
except Exception:
e = sys.exc_info()
logger.warning('Error redis.save(), maybe redis is saving, it is OK: %s %s' % (e[0], e[1]))
#~ except Exception as ex:
#~ logger.error('cannot print ticket %s' % ex )
#~ self.ui.statusbar.showMessage('Printer error', 2000)
else:
self.updateStatus('Tujuan belum dipilih', 2000)
else:
# show info that cannot print ticket because outside of agent area
self.updateStatus('Di luar agen', 2000)
def switchDirection(self, direction):
logger.info('Switched direction')
self.updateDestinationPriceDistance('---', '---', '---')
self.route.switchDirection()
self.updateRouteDisplay()
self.say('Ganti arah')
def drawTrackBackground(self):
linePosY = self.scene.height() / 2
# draw horizontal line
self.scene.addLine(self.marginLR, linePosY, self.marginLR + self.linelength, linePosY)
alternateUpDown = True
prevUpRightX = 0
prevDownRightX = 0
wasDownDown = False
wasUpUp = False
# draw agency fonts
for dest in config.agents:
destItem = self.scene.addSimpleText(dest)
distanceDestFactor = self.route.simpleDistanceTo(config.agents[dest]['latlon'], normalized = True)
posLineX = self.marginLR + (distanceDestFactor * self.linelength)
posTextX = posLineX
if (posTextX - int(destItem.boundingRect().width() / 2)) < 0:
# first text
posTextX = 0
elif (posTextX + int(destItem.boundingRect().width() / 2)) > self.scene.width():
posTextX = self.size[0] - destItem.boundingRect().width()
else:
posTextX = posTextX - int(destItem.boundingRect().width() / 2)
posTextY = 0
if alternateUpDown:
# text below line
if wasDownDown or ((posTextX - self.marginLR) >= prevDownRightX):
posTextY = linePosY + 10
wasDownDown = False
else:
posTextY = linePosY + 10 + destItem.boundingRect().height()
wasDownDown = True
else:
# text above line
if wasUpUp or ((posTextX - self.marginLR) >= prevUpRightX):
posTextY= linePosY - (6 + destItem.boundingRect().height())
wasUpUp = False
else:
posTextY= linePosY - (6 + (2 *destItem.boundingRect().height()))
wasUpUp = True
destItem.setPos(posTextX, posTextY)
# draw connecting line between horizontal to font
self.scene.addLine(
posLineX, linePosY,
posLineX, posTextY if alternateUpDown else posTextY + destItem.boundingRect().height() - 4
)
if alternateUpDown:
prevDownRightX = posTextX - self.marginLR + destItem.boundingRect().width()
else:
prevUpRightX = posTextX - self.marginLR + destItem.boundingRect().width()
alternateUpDown = not alternateUpDown
def updateTrackDirection(self):
if self.arrow:
shape = [ (-10, - 8 ), (10, 0), (-10, 8 ), (0, 0) ] if self.route.mode == '>' else [ (10, - 8 ), (-10, 0), (10, 8 ), (0, 0) ]
pol = QtGui.QPolygonF()
for point in shape:
pol.append(QtCore.QPointF(point[0], point[1]))
self.arrow.setPolygon(pol)
else:
self.ui.statusbar.showMessage('Menunggu sinyal GPS...', 2000)
def updateTrackPosition(self, gpsPos):
# draw arrow showing actual bus position
if not self.arrow:
self.arrow = QtGui.QGraphicsPolygonItem()
self.arrow.setBrush(QtCore.Qt.red)
self.arrow.setPen(QtCore.Qt.red)
self.arrow.setVisible(False)
self.scene.addItem(self.arrow)
self.arrow.direction = None
if not self.arrow.direction or (self.arrow.direction != self.route.mode):
self.updateTrackDirection()
self.arrow.direction = self.route.mode
progress = self.route.simpleDistanceTo((gpsPos[QtCore.QString('lon')], gpsPos[QtCore.QString('lat')]), normalized = True)
if self.route.mode == '<':
progress = 1.0 - progress
self.arrow.setPos(self.marginLR+int(self.linelength*progress), self.scene.height() / 2)
self.arrow.setVisible(True)
def main():
app = QtCore.QCoreApplication(sys.argv)
#app.setStyle(QtGui.QStyleFactory.create("plastique"))
ex = MainApp()
#~ ex.show()
#ex.showFullScreen()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | from gpslistener import GpsListener
from gpiolistener import GPIOListener
from LCD40X4 import GPIO, lcd_init, lcd_goto, lcd_string, GPIO | random_line_split |
mesintiket-gen3.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore, QtNetwork
import time
from datetime import datetime
import simplejson
import zlib
import sha
import base64
import config
import configusb
import binascii
import uuid
from decimal import Decimal
import zmq
import redis
import subprocess
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
from printer_driver import PrinterC1
from Route import Route
import os
import config
from gpslistener import GpsListener
from gpiolistener import GPIOListener
from LCD40X4 import GPIO, lcd_init, lcd_goto, lcd_string, GPIO
import logging
import logging.handlers
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(thread)d %(levelname)-5s %(message)s')
fh = logging.handlers.RotatingFileHandler('log.txt', maxBytes=10000000, backupCount=5)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
PRINTER_PORT = '/dev/ttyAMA0'
class SpeechThread(QtCore.QThread):
update = QtCore.pyqtSignal(str)
def __init__(self, tosay):
QtCore.QThread.__init__(self)
self.tosay = tosay
def __del__(self):
self.wait()
def run(self):
subprocess.call('espeak -vid+f3 "%s"' % self.tosay, shell=True)
#~ self.terminate()
class MainApp(QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
self.context = zmq.Context()
self.dblayer = self.context.socket(zmq.REQ)
self.dblayer.connect("tcp://%s:%s" % (config.server_ip, config.server_port))
self.redis = redis.Redis('localhost')
self.route = Route(config.route, config.destinations)
# start new thread to listen to gps signal
self.gpsThread = GpsListener(configusb.gpsusbport, config.gps_baudrate)
self.gpsThread.message.connect(self.gpsReceived)
self.gpsThread.sat_info.connect(self.sat_infoReceived)
self.gpsThread.speed.connect(self.speedReceived)
self.gpsThread.start()
# start new thread to listen to gpio signal
dests = []
for d in config.destinations[0]:
dests.append(d)
self.gpioThread = GPIOListener(dests)
self.gpioThread.destinationPressed.connect(self.destinationChosen)
self.gpioThread.printPressed.connect(self.printTicket)
self.gpioThread.directionSwitched.connect(self.switchDirection)
self.gpioThread.start()
# Timer for sending position every 60 seconds to server
self.sendGpsTimer = QtCore.QTimer(self)
self.sendGpsTimer.timeout.connect(self.sendGpsPosition)
self.sendGpsTimer.start(60000)
# current state (Agent, destination, price)
self.currentAgent = None
self.currentDestination = None
self.currentDistance = None
self.currentLon = None
self.currentLat = None
# print init messages to printer
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
p.printString('Mesin Tiket Bus')
p.printString( config.company_name)
p.printString( 'Bus: %s' % config.bus_plateno)
p.cutPaper(0)
p.close()
# init LCD
#GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
#GPIO.setup(LCD_E, GPIO.OUT) # E
#GPIO.setup(LCD_E2, GPIO.OUT) # E2
#GPIO.setup(LCD_RS, GPIO.OUT) # RS
#GPIO.setup(LCD_D4, GPIO.OUT) # DB4
#GPIO.setup(LCD_D5, GPIO.OUT) # DB5
#GPIO.setup(LCD_D6, GPIO.OUT) # DB6
#GPIO.setup(LCD_D7, GPIO.OUT) # DB7
#GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable
# Initialise display
lcd_init()
lcd_string('Inisiasi sistem selesai..', 1, 1)
self.updateRouteDisplay()
logger.debug('init finished')
self.say('Mesin tiket siap digunakan')
def sendGpsPosition(self):
#~ logger.debug(self.gpsThread.lastpos)
|
def gpsReceived(self, gpsPos):
#~ print newpos
logger.debug('type of gpsPos: %s %s' % (type(gpsPos), repr(gpsPos)))
if gpsPos['type'] == 0:
if gpsPos['lon'] and gpsPos['lat']:
#self.updateTrackPosition(gpsPos)
#lcd_goto( 'Lon: {0:.6f} Lat: {1:.6f}'.format(gpsPos['lon'], gpsPos['lat']), 0, 3)
self.currentLon = gpsPos['lon']
self.currentLat = gpsPos['lat']
curAgent = self.getAgentInCurrentLocation(gpsPos, config.agents)
if self.currentAgent != curAgent:
self.updateCurrentAgent(curAgent)
#self.updateDestinations()
if not curAgent:
self.updateCurrentAgent('Di luar area')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
self.currentAgent = curAgent
else:
self.updateCurrentAgent('Belum mendapat sinyal GPS..')
# reset price, distance
self.updateDestinationPriceDistance('---', '---', '---')
def updateCurrentAgent(self, newAgent):
logger.debug('updateCurrentagent: %s' % newAgent)
lcd_string('Agen: %s' % newAgent, 1, 1)
def updateDestinationPriceDistance(self, dest, price, distance):
if dest != '---':
lcd_string("Tujuan: {0} Harga: Rp {1:,}".format(dest, price), 1, 2)
self.currentDestination = dest
self.currentPrice = price
self.currentDistance = distance
else:
lcd_string("Tujuan: --- Harga: ---", 1, 2)
self.currentDestination = None
self.currentPrice = None
self.currentDistance = None
def updateStatus(self, status, showTime):
lcd_string('{0}'.format(status), 1, 3)
self.say(status)
QtCore.QTimer.singleShot(showTime, self.resetStatus)
def resetStatus(self):
lcd_string('', 1, 3)
def speedReceived(self, speed):
lcd_goto(('%s kmh' % speed).ljust(7),0,4)
def sat_infoReceived(self, sat_info):
lcd_goto(('Sat:%s' % sat_info).ljust(6),8,4)
def updateNoTicket(self, ticket_no):
lcd_goto(('Tkt:%s' % ticket_no).ljust(7),15,4)
def updateRouteDisplay(self):
dests = self.route.getDestinationNames()
lcd_goto(('%s->%s' % (dests[0], dests[-1])).ljust(17),23,4)
def getAgentInCurrentLocation(self, gpsPos, agents):
#~ print curPos, agents
for agent in agents:
dist = abs(self.route.distanceTo((gpsPos['lon'], gpsPos['lat']), agents[agent]['latlon']))
#~ print dist
if dist <= (agents[agent]['radius'] / 1000.0):
return agent
return None
def destinationChosen(self, dest_qstring):
dest = str(dest_qstring)
if dest:
if self.currentAgent:
# check if selected destination is valid
if self.route.simpleDistanceTo(config.agents[self.currentAgent]['latlon'], normalized = True) < self.route.simpleDistanceTo(self.route.getDestinations()[dest]['latlon'], normalized = True):
distance = self.route.distanceTo(
config.agents[self.currentAgent]['latlon'],
self.route.getDestinations()[dest]['latlon']
)
price = self.calculatePrice(self.currentAgent, dest, distance)
self.updateDestinationPriceDistance(dest, price, distance)
self.say(dest)
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Tujuan tidak valid', 2000)
else:
self.updateStatus('Di luar agen', 2000)
self.updateDestinationPriceDistance('---', '---', '---')
else:
self.updateDestinationPriceDistance('---', '---', '---')
self.updateStatus('Error pemilihan tujuan', 2000)
def say(self, tosay):
subprocess.call('espeak -vid+f3 "%s" 2>/dev/null &' % tosay, shell=True)
#~ speechThread = SpeechThread(tosay)
#~ speechThread.start()
pass
def calculatePrice(self, fromAgent, destination, distance):
print (fromAgent, destination, distance)
for prices in self.route.getDestinations()[destination]['pricelist']:
if fromAgent in prices['from']:
return prices['price']
#~ return max(config.minimal_price, math.ceil((distance*config.price_per_km)/1000.0) * 1000)
return 0
def printTicket(self):
if self.currentAgent in config.agents:
if self.currentDestination: # if any destination selected
dest = self.currentDestination
# print ticket
#~ try:
if config.printer_enabled:
self.say('Mencetak tiket ke %s' % dest)
gpsdt = self.gpsThread.lastpos['gpsdt']
curdt = datetime.fromtimestamp(gpsdt)
# initialize or increment global ticket counter
if not self.redis.get('discountTicketCounter'):
self.redis.set('discountTicketCounter', 0)
self.redis.incr('discountTicketCounter')
# initialize or increment daily ticket counter
if not self.redis.get(curdt.strftime('%Y%m%d:ticket_no')):
self.redis.set(curdt.strftime('%Y%m%d:ticket_no'), 0)
self.redis.incr(curdt.strftime('%Y%m%d:ticket_no'))
isTicketFree = False
if int(self.redis.get('discountTicketCounter')) >= 100:
isTicketFree = True
self.redis.set('discountTicketCounter', 0)
p = PrinterC1(config.printer_port, 9600)
p.selectFont1(2)
if isTicketFree:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('GRATIS PROMO')
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
else:
p.printString(config.company_name)
p.printString(dest, 2, 4)
p.printString(curdt.strftime('%d-%b-%Y %H:%M' ), 2, 4)
p.printString('Rp {0:,}'.format(self.currentPrice), 2, 4)
p.selectFont1(0)
p.printString( 'Agen: %s' % self.currentAgent)
p.printString('{0:.1f} km'.format(self.currentDistance))
p.printString( 'Bus: %s' % config.bus_plateno)
p.printString( 'Tiket#: %s' % self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
p.printBarcode(2, '%03d%010d' % (config.bus_id, gpsdt))
p.cutPaper(0)
p.close()
#~ print 'PO. Sumber Alam'
#~ print config.bus_plateno
#~ print curdt.strftime('%d-%b-%Y %H:%M', time.localtime(curtime))
#~ print 'Tujuan: {}'.format(destListBox.selectedItem)
#~ print 'Jarak: 0{:.1f} km'.format(distance)
#~ print 'Harga: Rp. {0:.0f}'.format(self.ui.lblPrice.text())
#~ print '%03d%010d' % (config.bus_id, int(curtime))
# initialize or add daily total setoran
if not self.redis.get(curdt.strftime('%Y%m%d:setoran')):
self.redis.set(curdt.strftime('%Y%m%d:setoran'), 0)
self.redis.set(curdt.strftime('%Y%m%d:setoran'), int(self.redis.get(curdt.strftime('%Y%m%d:setoran'))) + self.currentPrice)
self.redis.rpush('mq', '$TIKET%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\r\n' % (
config.bus_plateno,
gpsdt,
self.redis.get(curdt.strftime('%Y%m%d:ticket_no')),
self.redis.get(curdt.strftime('%Y%m%d:setoran')),
self.currentAgent,
dest,
self.currentPrice if not isTicketFree else '0',
'{0:.1f}'.format(self.currentDistance),
self.currentLon,
self.currentLat
))
self.updateNoTicket(self.redis.get(curdt.strftime('%Y%m%d:ticket_no')))
try:
self.redis.save()
except Exception:
e = sys.exc_info()
logger.warning('Error redis.save(), maybe redis is saving, it is OK: %s %s' % (e[0], e[1]))
#~ except Exception as ex:
#~ logger.error('cannot print ticket %s' % ex )
#~ self.ui.statusbar.showMessage('Printer error', 2000)
else:
self.updateStatus('Tujuan belum dipilih', 2000)
else:
# show info that cannot print ticket because outside of agent area
self.updateStatus('Di luar agen', 2000)
def switchDirection(self, direction):
logger.info('Switched direction')
self.updateDestinationPriceDistance('---', '---', '---')
self.route.switchDirection()
self.updateRouteDisplay()
self.say('Ganti arah')
def drawTrackBackground(self):
linePosY = self.scene.height() / 2
# draw horizontal line
self.scene.addLine(self.marginLR, linePosY, self.marginLR + self.linelength, linePosY)
alternateUpDown = True
prevUpRightX = 0
prevDownRightX = 0
wasDownDown = False
wasUpUp = False
# draw agency fonts
for dest in config.agents:
destItem = self.scene.addSimpleText(dest)
distanceDestFactor = self.route.simpleDistanceTo(config.agents[dest]['latlon'], normalized = True)
posLineX = self.marginLR + (distanceDestFactor * self.linelength)
posTextX = posLineX
if (posTextX - int(destItem.boundingRect().width() / 2)) < 0:
# first text
posTextX = 0
elif (posTextX + int(destItem.boundingRect().width() / 2)) > self.scene.width():
posTextX = self.size[0] - destItem.boundingRect().width()
else:
posTextX = posTextX - int(destItem.boundingRect().width() / 2)
posTextY = 0
if alternateUpDown:
# text below line
if wasDownDown or ((posTextX - self.marginLR) >= prevDownRightX):
posTextY = linePosY + 10
wasDownDown = False
else:
posTextY = linePosY + 10 + destItem.boundingRect().height()
wasDownDown = True
else:
# text above line
if wasUpUp or ((posTextX - self.marginLR) >= prevUpRightX):
posTextY= linePosY - (6 + destItem.boundingRect().height())
wasUpUp = False
else:
posTextY= linePosY - (6 + (2 *destItem.boundingRect().height()))
wasUpUp = True
destItem.setPos(posTextX, posTextY)
# draw connecting line between horizontal to font
self.scene.addLine(
posLineX, linePosY,
posLineX, posTextY if alternateUpDown else posTextY + destItem.boundingRect().height() - 4
)
if alternateUpDown:
prevDownRightX = posTextX - self.marginLR + destItem.boundingRect().width()
else:
prevUpRightX = posTextX - self.marginLR + destItem.boundingRect().width()
alternateUpDown = not alternateUpDown
def updateTrackDirection(self):
if self.arrow:
shape = [ (-10, - 8 ), (10, 0), (-10, 8 ), (0, 0) ] if self.route.mode == '>' else [ (10, - 8 ), (-10, 0), (10, 8 ), (0, 0) ]
pol = QtGui.QPolygonF()
for point in shape:
pol.append(QtCore.QPointF(point[0], point[1]))
self.arrow.setPolygon(pol)
else:
self.ui.statusbar.showMessage('Menunggu sinyal GPS...', 2000)
def updateTrackPosition(self, gpsPos):
# draw arrow showing actual bus position
if not self.arrow:
self.arrow = QtGui.QGraphicsPolygonItem()
self.arrow.setBrush(QtCore.Qt.red)
self.arrow.setPen(QtCore.Qt.red)
self.arrow.setVisible(False)
self.scene.addItem(self.arrow)
self.arrow.direction = None
if not self.arrow.direction or (self.arrow.direction != self.route.mode):
self.updateTrackDirection()
self.arrow.direction = self.route.mode
progress = self.route.simpleDistanceTo((gpsPos[QtCore.QString('lon')], gpsPos[QtCore.QString('lat')]), normalized = True)
if self.route.mode == '<':
progress = 1.0 - progress
self.arrow.setPos(self.marginLR+int(self.linelength*progress), self.scene.height() / 2)
self.arrow.setVisible(True)
def main():
app = QtCore.QCoreApplication(sys.argv)
#app.setStyle(QtGui.QStyleFactory.create("plastique"))
ex = MainApp()
#~ ex.show()
#ex.showFullScreen()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| try:
if self.gpsThread.lastpos:
# ITPRO861001000786141,11025.595867,-659.625256,31,20121008035615.000,15,0,13,1,
gprmclon = 100 *(int(self.gpsThread.lastpos['lon']) + ((self.gpsThread.lastpos['lon'] - int(self.gpsThread.lastpos['lon'])) / 100 * 60))
gprmclat = 100 *(int(self.gpsThread.lastpos['lat']) + ((self.gpsThread.lastpos['lat'] - int(self.gpsThread.lastpos['lat'])) / 100 * 60))
gpsmsg = 'ITPRO%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\r\n' % (
config.bus_plateno,
gprmclon,
gprmclat,
self.gpsThread.lastpos['alt'],
time.strftime("%Y%m%d%H%M%S", time.gmtime(self.gpsThread.lastpos['gpsdt'])),
self.gpsThread.lastpos['no_sat'],
self.gpsThread.lastpos['speed'],
self.gpsThread.lastpos['bearing'],
'A',
self.gpsThread.lastpos['ext_power'],
'',
)
logger.debug('SENDGPSINFO: %s' % gpsmsg)
self.redis.rpush('mq', gpsmsg)
else:
logger.info('SENDGPSINFO: GPS not set, not sending position to server..')
except Exception:
e = sys.exc_info()
logger.error('SENDGPSINFO: Error sending GPS info: %s %s' % (e[0], e[1])) | identifier_body |
lp.go | // Copyright 2017 Fabian Wenzelmann <[email protected]>, Christian Schilling,
// Jan-Georg Smaus
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lpb
import (
"errors"
"fmt"
"math"
"sort"
"sync"
br "github.com/FabianWe/boolrecognition"
"github.com/draffensperger/golp"
)
// debug is used to panic in some conditions, if tested properly set to false.
const debug = true
// DNFTreeNodeContent is a node in the tree we construct for the regularity
// test. Each node stores a DNF, the information if that DNF is final (i.e.
// true or false), its depth and two children.
//
// The children are stored by ID, in the tree we store a list of all nodes
// and can therefore retrieve the actual node.
type DNFTreeNodeContent struct {
phi br.ClauseSet
leftChild, rightChild int
final bool
depth int
}
// A DNFTree is a collection of DNFTreeNodeContent objects.
// The root note is stored on position 0.
type DNFTree struct {
Content []*DNFTreeNodeContent
Nbvar int
}
// NewDNFTree returns an empty tree containing no nodes.
func NewDNFTree(nbvar int) *DNFTree {
return &DNFTree{Content: nil, Nbvar: nbvar}
}
// CreateNodeEntry creates a new node given its DNF, depth and the information
// if that DNF is final.
//
// It will append the new node to the tree and return the index of the new node.
func (tree *DNFTree) CreateNodeEntry(phi br.ClauseSet, depth int, isFinal bool) int {
n := &DNFTreeNodeContent{phi, -1, -1, isFinal, depth}
tree.Content = append(tree.Content, n)
return len(tree.Content) - 1
}
// CreateRoot creates a root node and returns its ID (should be 0).
func (tree *DNFTree) CreateRoot(phi br.ClauseSet, isFinal bool) int {
return tree.CreateNodeEntry(phi, 0, isFinal)
}
// CreateLeftChild creates a new node and sets the left child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateLeftChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateLeftChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateLeftChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.leftChild = id
return id
}
// CreateLeftChild creates a new node and sets the right child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateRightChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateRightChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateRightChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.rightChild = id
return id
}
// IsLeaf checks if the node is a leaf (has no child nodes).
func (tree *DNFTree) IsLeaf(nodeID int) bool {
n := tree.Content[nodeID]
return n.leftChild < 0 && n.rightChild < 0
}
type LPSplitResult struct {
Final bool
Phi br.ClauseSet
}
func (tree *DNFTree) Split(nodeID int) (*LPSplitResult, *LPSplitResult) {
n := tree.Content[nodeID]
variable := n.depth
first, second := br.NewClauseSet(len(n.phi)), br.NewClauseSet(len(n.phi))
for _, clause := range n.phi {
// check if the variable is contained
if len(clause) > 0 && clause[0] == variable {
// remove variable and add to the first result
first = append(first, clause[1:])
} else {
// variable not contained, so just add the complete clause
second = append(second, clause)
}
}
isFirstFinal, isSecondFinal := isFinal(first) != NotFinal, isFinal(second) != NotFinal
return &LPSplitResult{isFirstFinal, first}, &LPSplitResult{isSecondFinal, second}
}
// BuildTree will build the whole tree. The root note must be set already.
func (tree *DNFTree) BuildTree() {
if debug {
if len(tree.Content) != 1 {
panic("Expected a tree containing exactly one node (the root) in BuildTree")
}
}
if tree.Content[0].final {
// for true and false there is nothing to do
return
}
// create a queue that stores the node ids that must be explored
// add first node (root) to it
waiting := []int{0}
for len(waiting) != 0 {
nextID := waiting[0]
waiting = waiting[1:]
next := tree.Content[nextID]
if next.final {
// no splitting required for final node
continue
}
// split the node
first, second := tree.Split(nextID)
if first.Final {
if len(first.Phi) != 0 {
leftID := tree.CreateLeftChild(nextID, first.Phi, true)
waiting = append(waiting, leftID)
}
// TODO why only in this case?
} else {
leftID := tree.CreateLeftChild(nextID, first.Phi, false)
waiting = append(waiting, leftID)
}
if second.Final {
if len(second.Phi) != 0 {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
} else {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
}
}
func (tree *DNFTree) IsImplicant(mtp br.BooleanVector) bool {
uID := 0
for k := 0; k < len(mtp); k++ {
u := tree.Content[uID]
if tree.IsLeaf(uID) {
return true
}
leftChild, rightChild := u.leftChild, u.rightChild
if mtp[k] {
if leftChild >= 0 {
uID = leftChild
continue
} else {
if debug {
if rightChild < 0 {
panic("rightChild must not be nil in IsImplicant")
}
}
uID = rightChild
}
} else {
if rightChild >= 0 {
uID = rightChild
continue
} else {
return false
}
}
}
if debug {
if !(tree.Content[uID].leftChild < 0 && tree.Content[uID].rightChild < 0) {
panic("rightChild and leftChild must be nil in IsImplicant")
}
}
return true
}
func (tree *DNFTree) IsRegular(mtps []br.BooleanVector) bool {
numRuns := tree.Nbvar - 1
res := true
// we will do this concurrently:
// for each mtp iterate over all variable combinations and perform the test
// and write the result to a channel
// this also has some drawback: we need to wait for all mtps to finish
// otherwise we would need some context wish would be too much here
// so they all must write a result, even if one already returns false...
report := make(chan bool, 10)
// channel to report once we read all results
done := make(chan bool)
go func() {
for i := 0; i < len(mtps); i++ {
nxt := <-report
if !nxt {
res = false
}
}
done <- true
}()
for k := 0; k < len(mtps); k++ {
go func(index int) {
mtp := mtps[index]
check := true
for i := 0; i < numRuns; i++ {
if (!mtp[i]) && (mtp[i+1]) {
// change the positions in the point, after the implicant test
// we will change them again
mtp[i] = true
mtp[i+1] = false
isImplicant := tree.IsImplicant(mtp)
mtp[i] = false
mtp[i+1] = true
if !isImplicant {
check = false
break
}
}
}
report <- check
}(k)
}
// wait until all results are there
<-done
return res
}
// TightenMode describes different modes to tighten the linear program
// before solving it.
//
// There are three different modes described below.
type TightenMode int
const (
TightenNone TightenMode = iota // Add only constraings necessary for solving the problem
TightenNeighbours // Add also constraings between variables x(i) and x(i + 1)
TightenAll // Add additional constraints between all variable pairs
)
type LinearProgram struct {
Renaming, ReverseRenaming []int
Tree *DNFTree
Winder br.WinderMatrix
LP *golp.LP
MFPs, MTPs []br.BooleanVector
Phi br.ClauseSet
Nbvar int
}
// NewLinearProgram creates a new lp given the DNF ϕ.
//
// It will however not create the actual program or the tree, this must be done
// somewhere else, it only creates the root node.
//
// Important note: For our algorithm to work the variables must be sorted
// according to their importance. Since this is not always the case (only
// during testing and some very special cases) this method will do this for
// you, i.e. it will create the winder matrix and then rename all
// variables accordingly. So the DNF we store in the root node is the
// renamed DNF. But we also store the mapping that caused this renaming
// in the field Renaming. This slice stores for each "old" variable
// the id in the new tree, i.e. a lookup tree.Renaming[id] gives you the
// id of the variable in the new tree.
// The reverse mapping, i.e. new variable → old variable is stored in
// ReverseRenaming.
//
// If you don't need the renaming set sortMatrix to false, in this case
// the matrix will work properly but the variables don't get sorted.
// That is only set it to false if you know that the ordering of the variables
// is already correct.
// Renaming and ReverseRenaming will be set to nil in this case.
//
// Also the clauses in the DNF must be sorted in increasing order.
// If you don't want the clauses to get sorted set sortClauses to false.
// Of course this only makes sense if also sortMatrix is set to false,
// otherwise the new dnf might not be sorted.
// This functions will sort them in this case nonetheless.
//
// The variables in the DNF have to be 0 <= v < nbar (so nbvar must be correct
// and variables start with 0).
// Also each variable should appear at least once in the DNF, what happens
// otherwise is not tested yet.
func NewLinearProgram(phi br.ClauseSet, nbvar int, sortMatrix, sortClauses bool) *LinearProgram {
tree := NewDNFTree(nbvar)
newDNF, winder, renaming, reverseRenaming := InitLP(phi, nbvar, sortMatrix)
if sortMatrix || sortClauses {
newDNF.SortAll()
}
dnfType := isFinal(newDNF)
rootID := tree.CreateRoot(newDNF, dnfType != NotFinal)
if debug {
if rootID != 0 {
panic("Expected root id to be 0, in NewLinearProgram")
}
}
return &LinearProgram{Renaming: renaming,
ReverseRenaming: reverseRenaming,
Tree: tree,
Winder: winder,
LP: nil,
MFPs: nil,
MTPs: nil,
Phi: newDNF,
Nbvar: nbvar,
}
}
// InitLP initializes the lp, that is it creates the Winder matrix for
// (the renamed) ϕ.
// It will also compute Renaming and ReverseRenaming as discussed in
// NewLinearProgram.
//
// It returns first the renamedDNF, the Winder matrix, then Renaming and then
// ReverseRenaming.
// If sortMatrix is false the old dnf will be returned.
func InitLP(phi br.ClauseSet, nbvar int, sortMatrix bool) (br.ClauseSet, br.WinderMatrix, []int, []int) {
newDNF := phi
var renaming, reverseRenaming []int = nil, nil
winder := br.NewWinderMatrix(phi, nbvar, true)
if sortMatrix {
renaming = make([]int, nbvar)
reverseRenaming = make([]int, nbvar)
// sort the matrix
winder.Sort()
// create the renaming
for newVariableId, row := range winder {
| ewDNF = make([]br.Clause, len(phi))
// clone each clause
// we'll do that concurrently
var wg sync.WaitGroup
wg.Add(len(phi))
for i := 0; i < len(phi); i++ {
go func(index int) {
clause := phi[index]
var newClause br.Clause = make([]int, len(clause))
for j, oldID := range clause {
newClause[j] = renaming[oldID]
}
newDNF[index] = newClause
wg.Done()
}(i)
}
wg.Wait()
}
return newDNF, winder, renaming, reverseRenaming
}
func (lp LinearProgram) Solve(tighten TightenMode, regTest bool) (*LPB, error) {
// create minimal true points
mtps := ComputeMTPs(lp.Phi, lp.Nbvar)
lp.MTPs = mtps
// if regularity test should be beformed create the DNF tree
if regTest {
lp.Tree.BuildTree()
if !lp.Tree.IsRegular(mtps) {
return nil, errors.New("DNF is not regular")
}
}
// compute maximal false points
mfps := ComputeMFPs(mtps, true)
lp.MFPs = mfps
// setup the linear program
program, setupErr := FormulateLP(mtps, mfps, lp.Nbvar, lp.Winder, tighten)
if setupErr != nil {
return nil, setupErr
}
lp.LP = program
// try to convert it
return SolveLP(program)
}
// ComputeMTPs computes the set of minimal true points of a minimal ϕ.
// Since ϕ is minimal this is easy: Each clause defines exactly one minimal
// true point.
func ComputeMTPs(phi br.ClauseSet, nbvar int) []br.BooleanVector {
res := make([]br.BooleanVector, len(phi))
for i, clause := range phi {
point := br.NewBooleanVector(nbvar)
res[i] = point
for _, v := range clause {
point[v] = true
}
}
return res
}
// TODO test this with 0, I don't know what happens to the wait group
// otherwise, or just never call it with a DNF with zero clauses
func ComputeMFPs(mtps []br.BooleanVector, sortPoints bool) []br.BooleanVector {
// first sort the mtps
if sortPoints {
cmp := func(i, j int) bool {
p1, p2 := mtps[i], mtps[j]
if debug {
if len(p1) != len(p2) {
panic("MTPS must be of same length in ComputeMFPs")
}
}
size := len(p1)
for k := 0; k < size; k++ {
val1, val2 := p1[k], p2[k]
if (!val1) && val2 {
return true
} else if val1 && (!val2) {
return false
}
}
if debug {
panic("Must not reach this state in ComputeMFPs")
}
return false
}
sort.Slice(mtps, cmp)
}
// compute nu, we do this concurrently
var wg sync.WaitGroup
wg.Add(len(mtps) - 1)
nu := make([]int, len(mtps))
for i := 1; i < len(mtps); i++ {
go func(index int) {
vars := len(mtps[index])
for j := 0; j < vars; j++ {
val1 := mtps[index-1][j]
val2 := mtps[index][j]
if (!val1) && val2 {
nu[index] = j + 1
break
}
}
wg.Done()
}(i)
}
wg.Wait()
// create the actual points, again we do that concurrently and communicate
// via a channel
// we range over that channel so we must not forget to close it!
res := make([]br.BooleanVector, 0, 10)
// start a function that listens on the channel and adds all points to the
// result
// we use a done channel to signal when all points have been added
resChan := make(chan br.BooleanVector, 10)
done := make(chan bool)
go func() {
for point := range resChan {
res = append(res, point)
}
done <- true
}()
// in the wait group we wait until for all i we've added all points
// after all points were written to the channel we close the channel and then
// wait until they have been added to result
wg.Add(len(mtps))
for i := 0; i < len(mtps); i++ {
go func(index int) {
point := mtps[index]
vars := len(point)
for j := nu[index]; j < vars; j++ {
if point[j] {
if debug {
if nu[index] > j {
panic("nu[i] must be <= j in ComputeMFPs")
}
}
newPoint := point.Clone()
newPoint[j] = false
for k := j + 1; k < vars; k++ {
newPoint[k] = true
}
resChan <- newPoint
}
}
wg.Done()
}(i)
}
wg.Wait()
close(resChan)
// now wait until all points were added to res
<-done
return res
}
// FormulateLP will formulate the linear program to solve.
// It will set the following constraings:
// 1. All variables must be of type int (note that this is really bad for
// the runtime of lpsolve)
// 2. For each minimal true point (a1, ..., ak) where ai are the variables
// that are true a constraint that says that the sum of
// all variables must be ≥ the degree
// that is we transform the problem a bit and get:
// a1 + ... + ak ≥ d ⇔ a1 + ... + ak -d ≥ 0
// 3. For each maximal false point (a1, ..., ak) where are ai are the variables
// that are true a constraint that says that the sum of all variables
// must be < the degree:
// a1 + ... + ak < d
// because lpsolve only allows ≤ we transform this to
// a1 + ... + ak ≤ d - 1 ⇔ a1 + ... + ak - ≤ -1
//
// The additional constraints depend on the mode:
// If mode is TightenNeighbours we compare all variables w(i) and w(i+1).
// We know that it must always hold that w(i) ≥ w(i+1), but it could also
// be w(i) = w(i+1), we find that out by comparing the Winder matrix entries.
// So we have w(i) ≥ w(i+1) ⇔ w(i) - w(i+1) >= 0 or w(i) - w(i+1) = 0.
// TODO we can make this easily concurrent
func FormulateLP(mtps, mfps []br.BooleanVector, nbvar int, winder br.WinderMatrix, tighten TightenMode) (*golp.LP, error) {
// go uses zero based ids, so all variables have ids between 0 and nbvar -1
// the degree has id nbvar
degreeID := nbvar
lp := golp.NewLP(0, nbvar+1)
// set int constraing on all variables
for column := 0; column < nbvar+1; column++ {
lp.SetInt(column, true)
}
for _, mtp := range mtps {
// now add the constraint
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mtp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
// add the row
if err := lp.AddConstraintSparse(row, golp.GE, 0); err != nil {
return nil, err
}
}
for _, mfp := range mfps {
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mfp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
if err := lp.AddConstraintSparse(row, golp.LE, -1); err != nil {
return nil, err
}
}
// now we add additional constraints, depending on the mode
switch {
case tighten == TightenNeighbours:
// add a constraint for neighbouring variables
// we already know that w(i) ≥ w(i+1), but we could already conclude
// that they must be equal
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
for i := 1; i < nbvar; i++ {
// compare both rows
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
var constraint golp.ConstraintType = golp.GE
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
if compRes == 0 {
constraint = golp.EQ
}
// now update the row and add the constraint
entry1.Col = i - 1
entry2.Col = i
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, constraint, 0); err != nil {
return nil, err
}
}
case tighten == TightenAll && nbvar > 0:
// first we will compare each entry i with i+1 and save the comparison
// result between i and i + 1
// we make use of the transitivity of the comparison and later we don't
// have to compare matrix rows again.
// To add all pairs between i and j where i < j we simply have to lookup
// the precomputed results: as long as the comparison result is = the
// variables must be equal, after that only ≥
// TODO would be nice if someone checked this... really confusing
// with all this index stuff ;)
precomputed := make([]int, nbvar-1)
for i := 1; i < nbvar; i++ {
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
precomputed[i-1] = compRes
}
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
// now add all variable pair results
for i := 0; i < nbvar; i++ {
entry1.Col = i
// first find the longest sequence s.t. the variables are equal
j := i + 1
// loop as long as j is equivalent to its predecessor
// as long as this is the case i is equal to j
for ; j < nbvar && precomputed[j-1] == 0; j++ {
// add eq constraing
entry2.Col = j
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.EQ, 0); err != nil {
return nil, err
}
}
// for all remaining j simply add ≥ constraint
for ; j < nbvar; j++ {
entry2.Col = j
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.GE, 0); err != nil {
return nil, err
}
}
}
}
// TODO some objective must be set
// I only came up with this, however this will also keep the Coefficients
// small...
obj := make([]float64, nbvar+1)
// for i := 0; i < nbvar+1; i++ {
// obj[i] = 1.0
// }
obj[0] = 0.0
lp.SetObjFn(obj)
return lp, nil
}
// TODO only call if there is at least one variable
func SolveLP(lp *golp.LP) (*LPB, error) {
lp.SetVerboseLevel(golp.CRITICAL)
convRes := lp.Solve()
// TODO I've added suboptiomal, this should be ok as well?
// TODO seems the constants in golp are wrong...
// we should use them but it's broken :(
if convRes != golp.OPTIMAL && convRes != golp.SUBOPTIMAL {
return nil, fmt.Errorf("Can't solve linear program, lpsolve solution type is %v", convRes)
}
vars := lp.Variables()
coeffs := make([]LPBCoeff, len(vars)-1)
for i, asFloat := range vars[:len(vars)-1] {
// just to be sure
asFloat = math.Floor(asFloat)
coeff := LPBCoeff(asFloat)
coeffs[i] = coeff
}
threshold := LPBCoeff(vars[len(vars)-1])
return NewLPB(threshold, coeffs), nil
}
// LPSolver implements the DNFToLPB interface by using the linear programming
// algorithm.
//
// Ther are some options you can change, see NewLPSolver, NewLinearProgram and
// NewLinearProgram.Solve for more details.
//
// It will also rename the variables in the LPB again, that is if the variables
// were renamed for our algorithm to work it will rename the resulting LPB
// correctly.
type LPSolver struct {
SortMatrix, SortClauses, RegTest bool
Tighten TightenMode
}
// NewLPSolver returns a new LPSolver with SortMatrix, SortClauses and RegTest
// set to true.
//
// For details of these variables see NewLinearProgram and LinearProgram.Solve
// for more details.
//
// tighten describes how many additional constraints should be added to the
// lp, see TightenMode documentation for more details.
func NewLPSolver(tighten TightenMode) *LPSolver {
return &LPSolver{SortMatrix: true,
SortClauses: true,
RegTest: true,
Tighten: tighten,
}
}
// Convert does everything required to compute the LPB: It sets up the program
// and tries to solve it.
// It will also undo the renaming if required.
func (s *LPSolver) Convert(phi br.ClauseSet, nbvar int) (*LPB, error) {
lp := NewLinearProgram(phi, nbvar, s.SortMatrix, s.SortClauses)
res, err := lp.Solve(s.Tighten, s.RegTest)
if err != nil {
return nil, err
}
// undo renaming
return res.Rename(lp.ReverseRenaming), nil
}
| renaming[row[len(row)-1]] = newVariableId
reverseRenaming[newVariableId] = row[len(row)-1]
}
n | conditional_block |
lp.go | // Copyright 2017 Fabian Wenzelmann <[email protected]>, Christian Schilling,
// Jan-Georg Smaus
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lpb
import (
"errors"
"fmt"
"math"
"sort"
"sync"
br "github.com/FabianWe/boolrecognition"
"github.com/draffensperger/golp"
)
// debug is used to panic in some conditions, if tested properly set to false.
const debug = true
// DNFTreeNodeContent is a node in the tree we construct for the regularity
// test. Each node stores a DNF, the information if that DNF is final (i.e.
// true or false), its depth and two children.
//
// The children are stored by ID, in the tree we store a list of all nodes
// and can therefore retrieve the actual node.
type DNFTreeNodeContent struct {
phi br.ClauseSet
leftChild, rightChild int
final bool
depth int
}
// A DNFTree is a collection of DNFTreeNodeContent objects.
// The root note is stored on position 0.
type DNFTree struct {
Content []*DNFTreeNodeContent
Nbvar int
}
// NewDNFTree returns an empty tree containing no nodes.
func NewDNFTree(nbvar int) *DNFTree {
return &DNFTree{Content: nil, Nbvar: nbvar}
}
// CreateNodeEntry creates a new node given its DNF, depth and the information
// if that DNF is final.
//
// It will append the new node to the tree and return the index of the new node.
func (tree *DNFTree) CreateNodeEntry(phi br.ClauseSet, depth int, isFinal bool) int {
n := &DNFTreeNodeContent{phi, -1, -1, isFinal, depth}
tree.Content = append(tree.Content, n)
return len(tree.Content) - 1
}
// CreateRoot creates a root node and returns its ID (should be 0).
func (tree *DNFTree) CreateRoot(phi br.ClauseSet, isFinal bool) int {
return tree.CreateNodeEntry(phi, 0, isFinal)
}
// CreateLeftChild creates a new node and sets the left child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateLeftChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateLeftChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateLeftChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.leftChild = id
return id
}
// CreateLeftChild creates a new node and sets the right child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateRightChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateRightChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateRightChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.rightChild = id
return id
}
// IsLeaf checks if the node is a leaf (has no child nodes).
func (tree *DNFTree) IsLeaf(nodeID int) bool {
n := tree.Content[nodeID]
return n.leftChild < 0 && n.rightChild < 0
}
type LPSplitResult struct {
Final bool
Phi br.ClauseSet
}
func (tree *DNFTree) Split(nodeID int) (*LPSplitResult, *LPSplitResult) {
n := tree.Content[nodeID]
variable := n.depth
first, second := br.NewClauseSet(len(n.phi)), br.NewClauseSet(len(n.phi))
for _, clause := range n.phi {
// check if the variable is contained
if len(clause) > 0 && clause[0] == variable {
// remove variable and add to the first result
first = append(first, clause[1:])
} else {
// variable not contained, so just add the complete clause
second = append(second, clause)
}
}
isFirstFinal, isSecondFinal := isFinal(first) != NotFinal, isFinal(second) != NotFinal
return &LPSplitResult{isFirstFinal, first}, &LPSplitResult{isSecondFinal, second}
}
// BuildTree will build the whole tree. The root note must be set already.
func (tree *DNFTree) BuildTree() {
if debug {
if len(tree.Content) != 1 {
panic("Expected a tree containing exactly one node (the root) in BuildTree")
}
}
if tree.Content[0].final {
// for true and false there is nothing to do
return
}
// create a queue that stores the node ids that must be explored
// add first node (root) to it
waiting := []int{0}
for len(waiting) != 0 {
nextID := waiting[0]
waiting = waiting[1:]
next := tree.Content[nextID]
if next.final {
// no splitting required for final node
continue
}
// split the node
first, second := tree.Split(nextID)
if first.Final {
if len(first.Phi) != 0 {
leftID := tree.CreateLeftChild(nextID, first.Phi, true)
waiting = append(waiting, leftID)
}
// TODO why only in this case?
} else {
leftID := tree.CreateLeftChild(nextID, first.Phi, false)
waiting = append(waiting, leftID)
}
if second.Final {
if len(second.Phi) != 0 {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
} else {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
}
}
func (tree *DNFTree) IsImplicant(mtp br.BooleanVector) bool {
uID := 0
for k := 0; k < len(mtp); k++ {
u := tree.Content[uID]
if tree.IsLeaf(uID) {
return true
}
leftChild, rightChild := u.leftChild, u.rightChild
if mtp[k] {
if leftChild >= 0 {
uID = leftChild
continue
} else {
if debug {
if rightChild < 0 {
panic("rightChild must not be nil in IsImplicant")
}
}
uID = rightChild
}
} else {
if rightChild >= 0 {
uID = rightChild
continue
} else {
return false
}
}
}
if debug {
if !(tree.Content[uID].leftChild < 0 && tree.Content[uID].rightChild < 0) {
panic("rightChild and leftChild must be nil in IsImplicant")
}
}
return true
}
func (tree *DNFTree) IsRegular(mtps []br.BooleanVector) bool {
numRuns := tree.Nbvar - 1
res := true
// we will do this concurrently:
// for each mtp iterate over all variable combinations and perform the test
// and write the result to a channel
// this also has some drawback: we need to wait for all mtps to finish
// otherwise we would need some context wish would be too much here
// so they all must write a result, even if one already returns false...
report := make(chan bool, 10)
// channel to report once we read all results
done := make(chan bool)
go func() {
for i := 0; i < len(mtps); i++ {
nxt := <-report
if !nxt {
res = false
}
}
done <- true
}()
for k := 0; k < len(mtps); k++ {
go func(index int) {
mtp := mtps[index]
check := true
for i := 0; i < numRuns; i++ {
if (!mtp[i]) && (mtp[i+1]) {
// change the positions in the point, after the implicant test
// we will change them again
mtp[i] = true
mtp[i+1] = false
isImplicant := tree.IsImplicant(mtp)
mtp[i] = false
mtp[i+1] = true
if !isImplicant {
check = false
break
}
}
}
report <- check
}(k)
}
// wait until all results are there
<-done
return res
}
// TightenMode describes different modes to tighten the linear program
// before solving it.
//
// There are three different modes described below.
type TightenMode int
const (
TightenNone TightenMode = iota // Add only constraings necessary for solving the problem
TightenNeighbours // Add also constraings between variables x(i) and x(i + 1)
TightenAll // Add additional constraints between all variable pairs
)
type LinearProgram struct {
Renaming, ReverseRenaming []int
Tree *DNFTree
Winder br.WinderMatrix
LP *golp.LP
MFPs, MTPs []br.BooleanVector
Phi br.ClauseSet
Nbvar int
}
// NewLinearProgram creates a new lp given the DNF ϕ.
//
// It will however not create the actual program or the tree, this must be done
// somewhere else, it only creates the root node.
//
// Important note: For our algorithm to work the variables must be sorted
// according to their importance. Since this is not always the case (only
// during testing and some very special cases) this method will do this for
// you, i.e. it will create the winder matrix and then rename all
// variables accordingly. So the DNF we store in the root node is the
// renamed DNF. But we also store the mapping that caused this renaming
// in the field Renaming. This slice stores for each "old" variable
// the id in the new tree, i.e. a lookup tree.Renaming[id] gives you the
// id of the variable in the new tree.
// The reverse mapping, i.e. new variable → old variable is stored in
// ReverseRenaming.
//
// If you don't need the renaming set sortMatrix to false, in this case
// the matrix will work properly but the variables don't get sorted.
// That is only set it to false if you know that the ordering of the variables
// is already correct.
// Renaming and ReverseRenaming will be set to nil in this case.
//
// Also the clauses in the DNF must be sorted in increasing order.
// If you don't want the clauses to get sorted set sortClauses to false.
// Of course this only makes sense if also sortMatrix is set to false,
// otherwise the new dnf might not be sorted.
// This functions will sort them in this case nonetheless.
//
// The variables in the DNF have to be 0 <= v < nbar (so nbvar must be correct
// and variables start with 0).
// Also each variable should appear at least once in the DNF, what happens
// otherwise is not tested yet.
func NewLinearProgram(phi br.ClauseSet, nbvar int, sortMatrix, sortClauses bool) *LinearProgram {
tree := NewDNFTree(nbvar)
newDNF, winder, renaming, reverseRenaming := InitLP(phi, nbvar, sortMatrix)
if sortMatrix || sortClauses {
newDNF.SortAll()
}
dnfType := isFinal(newDNF)
rootID := tree.CreateRoot(newDNF, dnfType != NotFinal)
if debug {
if rootID != 0 {
panic("Expected root id to be 0, in NewLinearProgram")
}
}
return &LinearProgram{Renaming: renaming,
ReverseRenaming: reverseRenaming,
Tree: tree,
Winder: winder,
LP: nil,
MFPs: nil,
MTPs: nil,
Phi: newDNF,
Nbvar: nbvar,
}
}
// InitLP initializes the lp, that is it creates the Winder matrix for
// (the renamed) ϕ.
// It will also compute Renaming and ReverseRenaming as discussed in
// NewLinearProgram.
//
// It returns first the renamedDNF, the Winder matrix, then Renaming and then
// ReverseRenaming.
// If sortMatrix is false the old dnf will be returned.
func InitLP(phi br.ClauseSet, nbvar int, sortMatrix bool) (br.ClauseSet, br.WinderMatrix, []int, []int) {
newDNF := phi
var renaming, reverseRenaming []int = nil, nil
winder := br.NewWinderMatrix(phi, nbvar, true)
if sortMatrix {
renaming = make([]int, nbvar)
reverseRenaming = make([]int, nbvar)
// sort the matrix
winder.Sort()
// create the renaming
for newVariableId, row := range winder {
renaming[row[len(row)-1]] = newVariableId
reverseRenaming[newVariableId] = row[len(row)-1]
}
newDNF = make([]br.Clause, len(phi))
// clone each clause
// we'll do that concurrently
var wg sync.WaitGroup
wg.Add(len(phi))
for i := 0; i < len(phi); i++ {
go func(index int) {
clause := phi[index]
var newClause br.Clause = make([]int, len(clause))
for j, oldID := range clause {
newClause[j] = renaming[oldID]
}
newDNF[index] = newClause
wg.Done()
}(i)
}
wg.Wait()
}
return newDNF, winder, renaming, reverseRenaming
}
func (lp LinearProgram) Solve(tighten TightenMode, regTest bool) (*LPB, error) {
// create minimal true points
mtps := ComputeMTPs(lp.Phi, lp.Nbvar)
lp.MTPs = mtps
// if regularity test should be beformed create the DNF tree
if regTest {
lp.Tree.BuildTree()
if !lp.Tree.IsRegular(mtps) {
return nil, errors.New("DNF is not regular")
}
}
// compute maximal false points
mfps := ComputeMFPs(mtps, true)
lp.MFPs = mfps
// setup the linear program
program, setupErr := FormulateLP(mtps, mfps, lp.Nbvar, lp.Winder, tighten)
if setupErr != nil {
return nil, setupErr
}
lp.LP = program
// try to convert it
return SolveLP(program)
}
// ComputeMTPs computes the set of minimal true points of a minimal ϕ.
// Since ϕ is minimal this is easy: Each clause defines exactly one minimal
// true point.
func ComputeMTPs(phi br.ClauseSet, nbvar int) []br.BooleanVector {
res := make([]br.BooleanVector, len(phi))
for i, clause := range phi {
point := br.NewBooleanVector(nbvar)
res[i] = point
for _, v := range clause {
point[v] = true
}
}
return res
}
// TODO test this with 0, I don't know what happens to the wait group
// otherwise, or just never call it with a DNF with zero clauses
func ComputeMFPs(mtps []br.BooleanVector, sortPoints bool) []br.BooleanVector {
// first sort the mtps
if sortPoints {
cmp := func(i, j int) bool {
p1, p2 := mtps[i], mtps[j]
if debug {
if len(p1) != len(p2) {
panic("MTPS must be of same length in ComputeMFPs")
}
}
size := len(p1)
for k := 0; k < size; k++ {
val1, val2 := p1[k], p2[k]
if (!val1) && val2 {
return true
} else if val1 && (!val2) {
return false
}
}
if debug {
panic("Must not reach this state in ComputeMFPs")
}
return false
}
sort.Slice(mtps, cmp)
}
// compute nu, we do this concurrently
var wg sync.WaitGroup
wg.Add(len(mtps) - 1)
nu := make([]int, len(mtps))
for i := 1; i < len(mtps); i++ {
go func(index int) {
vars := len(mtps[index])
for j := 0; j < vars; j++ {
val1 := mtps[index-1][j]
val2 := mtps[index][j]
if (!val1) && val2 {
nu[index] = j + 1
break
}
}
wg.Done()
}(i)
}
wg.Wait()
// create the actual points, again we do that concurrently and communicate
// via a channel
// we range over that channel so we must not forget to close it!
res := make([]br.BooleanVector, 0, 10)
// start a function that listens on the channel and adds all points to the
// result
// we use a done channel to signal when all points have been added
resChan := make(chan br.BooleanVector, 10)
done := make(chan bool)
go func() {
for point := range resChan {
res = append(res, point)
}
done <- true
}()
// in the wait group we wait until for all i we've added all points
// after all points were written to the channel we close the channel and then
// wait until they have been added to result
wg.Add(len(mtps))
for i := 0; i < len(mtps); i++ {
go func(index int) {
point := mtps[index]
vars := len(point)
for j := nu[index]; j < vars; j++ {
if point[j] {
if debug {
if nu[index] > j {
panic("nu[i] must be <= j in ComputeMFPs")
}
}
newPoint := point.Clone()
newPoint[j] = false
for k := j + 1; k < vars; k++ {
newPoint[k] = true
}
resChan <- newPoint
}
}
wg.Done()
}(i)
}
wg.Wait()
close(resChan)
// now wait until all points were added to res
<-done
return res
}
// FormulateLP will formulate the linear program to solve.
// It will set the following constraings:
// 1. All variables must be of type int (note that this is really bad for
// the runtime of lpsolve)
// 2. For each minimal true point (a1, ..., ak) where ai are the variables
// that are true a constraint that says that the sum of
// all variables must be ≥ the degree
// that is we transform the problem a bit and get:
// a1 + ... + ak ≥ d ⇔ a1 + ... + ak -d ≥ 0
// 3. For each maximal false point (a1, ..., ak) where are ai are the variables
// that are true a constraint that says that the sum of all variables
// must be < the degree:
// a1 + ... + ak < d
// because lpsolve only allows ≤ we transform this to
// a1 + ... + ak ≤ d - 1 ⇔ a1 + ... + ak - ≤ -1
//
// The additional constraints depend on the mode:
// If mode is TightenNeighbours we compare all variables w(i) and w(i+1).
// We know that it must always hold that w(i) ≥ w(i+1), but it could also
// be w(i) = w(i+1), we find that out by comparing the Winder matrix entries.
// So we have w(i) ≥ w(i+1) ⇔ w(i) - w(i+1) >= 0 or w(i) - w(i+1) = 0.
// TODO we can make this easily concurrent
func FormulateLP(mtps, mfps []br.BooleanVector, nbvar int, winder br.WinderMatrix, tighten TightenMode) (*golp.LP, error) {
// go uses zero based ids, so all variables have ids between 0 and nbvar -1
// the degree has id nbvar
degreeID := nbvar
lp := golp.NewLP(0, nbvar+1)
// set int constraing on all variables
for column := 0; column < nbvar+1; column++ {
lp.SetInt(column, true)
}
for _, mtp := range mtps {
// now add the constraint
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mtp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
// add the row
if err := lp.AddConstraintSparse(row, golp.GE, 0); err != nil {
return nil, err
}
}
for _, mfp := range mfps {
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mfp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
if err := lp.AddConstraintSparse(row, golp.LE, -1); err != nil {
return nil, err
}
}
// now we add additional constraints, depending on the mode
switch {
case tighten == TightenNeighbours:
// add a constraint for neighbouring variables
// we already know that w(i) ≥ w(i+1), but we could already conclude
// that they must be equal
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
for i := 1; i < nbvar; i++ {
// compare both rows
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
var constraint golp.ConstraintType = golp.GE
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
if compRes == 0 {
constraint = golp.EQ
}
// now update the row and add the constraint
entry1.Col = i - 1
entry2.Col = i
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, constraint, 0); err != nil {
return nil, err
}
}
case tighten == TightenAll && nbvar > 0:
// first we will compare each entry i with i+1 and save the comparison
// result between i and i + 1
// we make use of the transitivity of the comparison and later we don't
// have to compare matrix rows again.
// To add all pairs between i and j where i < j we simply have to lookup
// the precomputed results: as long as the comparison result is = the
// variables must be equal, after that only ≥
// TODO would be nice if someone checked this... really confusing
// with all this index stuff ;)
precomputed := make([]int, nbvar-1)
for i := 1; i < nbvar; i++ {
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
precomputed[i-1] = compRes
}
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
// now add all variable pair results
for i := 0; i < nbvar; i++ {
entry1.Col = i
// first find the longest sequence s.t. the variables are equal
j := i + 1
// loop as long as j is equivalent to its predecessor
// as long as this is the case i is equal to j
for ; j < nbvar && precomputed[j-1] == 0; j++ { | if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.EQ, 0); err != nil {
return nil, err
}
}
// for all remaining j simply add ≥ constraint
for ; j < nbvar; j++ {
entry2.Col = j
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.GE, 0); err != nil {
return nil, err
}
}
}
}
// TODO some objective must be set
// I only came up with this, however this will also keep the Coefficients
// small...
obj := make([]float64, nbvar+1)
// for i := 0; i < nbvar+1; i++ {
// obj[i] = 1.0
// }
obj[0] = 0.0
lp.SetObjFn(obj)
return lp, nil
}
// TODO only call if there is at least one variable
func SolveLP(lp *golp.LP) (*LPB, error) {
lp.SetVerboseLevel(golp.CRITICAL)
convRes := lp.Solve()
// TODO I've added suboptiomal, this should be ok as well?
// TODO seems the constants in golp are wrong...
// we should use them but it's broken :(
if convRes != golp.OPTIMAL && convRes != golp.SUBOPTIMAL {
return nil, fmt.Errorf("Can't solve linear program, lpsolve solution type is %v", convRes)
}
vars := lp.Variables()
coeffs := make([]LPBCoeff, len(vars)-1)
for i, asFloat := range vars[:len(vars)-1] {
// just to be sure
asFloat = math.Floor(asFloat)
coeff := LPBCoeff(asFloat)
coeffs[i] = coeff
}
threshold := LPBCoeff(vars[len(vars)-1])
return NewLPB(threshold, coeffs), nil
}
// LPSolver implements the DNFToLPB interface by using the linear programming
// algorithm.
//
// Ther are some options you can change, see NewLPSolver, NewLinearProgram and
// NewLinearProgram.Solve for more details.
//
// It will also rename the variables in the LPB again, that is if the variables
// were renamed for our algorithm to work it will rename the resulting LPB
// correctly.
type LPSolver struct {
SortMatrix, SortClauses, RegTest bool
Tighten TightenMode
}
// NewLPSolver returns a new LPSolver with SortMatrix, SortClauses and RegTest
// set to true.
//
// For details of these variables see NewLinearProgram and LinearProgram.Solve
// for more details.
//
// tighten describes how many additional constraints should be added to the
// lp, see TightenMode documentation for more details.
func NewLPSolver(tighten TightenMode) *LPSolver {
return &LPSolver{SortMatrix: true,
SortClauses: true,
RegTest: true,
Tighten: tighten,
}
}
// Convert does everything required to compute the LPB: It sets up the program
// and tries to solve it.
// It will also undo the renaming if required.
func (s *LPSolver) Convert(phi br.ClauseSet, nbvar int) (*LPB, error) {
lp := NewLinearProgram(phi, nbvar, s.SortMatrix, s.SortClauses)
res, err := lp.Solve(s.Tighten, s.RegTest)
if err != nil {
return nil, err
}
// undo renaming
return res.Rename(lp.ReverseRenaming), nil
} | // add eq constraing
entry2.Col = j | random_line_split |
lp.go | // Copyright 2017 Fabian Wenzelmann <[email protected]>, Christian Schilling,
// Jan-Georg Smaus
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lpb
import (
"errors"
"fmt"
"math"
"sort"
"sync"
br "github.com/FabianWe/boolrecognition"
"github.com/draffensperger/golp"
)
// debug is used to panic in some conditions, if tested properly set to false.
const debug = true
// DNFTreeNodeContent is a node in the tree we construct for the regularity
// test. Each node stores a DNF, the information if that DNF is final (i.e.
// true or false), its depth and two children.
//
// The children are stored by ID, in the tree we store a list of all nodes
// and can therefore retrieve the actual node.
type DNFTreeNodeContent struct {
phi br.ClauseSet
leftChild, rightChild int
final bool
depth int
}
// A DNFTree is a collection of DNFTreeNodeContent objects.
// The root note is stored on position 0.
type DNFTree struct {
Content []*DNFTreeNodeContent
Nbvar int
}
// NewDNFTree returns an empty tree containing no nodes.
func NewDNFTree(nbvar int) *DNFTree {
return &DNFTree{Content: nil, Nbvar: nbvar}
}
// CreateNodeEntry creates a new node given its DNF, depth and the information
// if that DNF is final.
//
// It will append the new node to the tree and return the index of the new node.
func (tree *DNFTree) CreateNodeEntry(phi br.ClauseSet, depth int, isFinal bool) int {
n := &DNFTreeNodeContent{phi, -1, -1, isFinal, depth}
tree.Content = append(tree.Content, n)
return len(tree.Content) - 1
}
// CreateRoot creates a root node and returns its ID (should be 0).
func (tree *DNFTree) CreateRoot(phi br.ClauseSet, isFinal bool) int {
return tree.CreateNodeEntry(phi, 0, isFinal)
}
// CreateLeftChild creates a new node and sets the left child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateLeftChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateLeftChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateLeftChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.leftChild = id
return id
}
// CreateLeftChild creates a new node and sets the right child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateRightChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateRightChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateRightChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.rightChild = id
return id
}
// IsLeaf checks if the node is a leaf (has no child nodes).
func (tree *DNFTree) IsLeaf(nodeID int) bool {
n := tree.Content[nodeID]
return n.leftChild < 0 && n.rightChild < 0
}
type LPSplitResult struct {
Final bool
Phi br.ClauseSet
}
func (tree *DNFTree) Split(nodeID int) (*LPSplitResult, *LPSplitResult) {
n := tree.Content[nodeID]
variable := n.depth
first, second := br.NewClauseSet(len(n.phi)), br.NewClauseSet(len(n.phi))
for _, clause := range n.phi {
// check if the variable is contained
if len(clause) > 0 && clause[0] == variable {
// remove variable and add to the first result
first = append(first, clause[1:])
} else {
// variable not contained, so just add the complete clause
second = append(second, clause)
}
}
isFirstFinal, isSecondFinal := isFinal(first) != NotFinal, isFinal(second) != NotFinal
return &LPSplitResult{isFirstFinal, first}, &LPSplitResult{isSecondFinal, second}
}
// BuildTree will build the whole tree. The root note must be set already.
func (tree *DNFTree) BuildTree() {
if debug {
if len(tree.Content) != 1 {
panic("Expected a tree containing exactly one node (the root) in BuildTree")
}
}
if tree.Content[0].final {
// for true and false there is nothing to do
return
}
// create a queue that stores the node ids that must be explored
// add first node (root) to it
waiting := []int{0}
for len(waiting) != 0 {
nextID := waiting[0]
waiting = waiting[1:]
next := tree.Content[nextID]
if next.final {
// no splitting required for final node
continue
}
// split the node
first, second := tree.Split(nextID)
if first.Final {
if len(first.Phi) != 0 {
leftID := tree.CreateLeftChild(nextID, first.Phi, true)
waiting = append(waiting, leftID)
}
// TODO why only in this case?
} else {
leftID := tree.CreateLeftChild(nextID, first.Phi, false)
waiting = append(waiting, leftID)
}
if second.Final {
if len(second.Phi) != 0 {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
} else {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
}
}
func (tree *DNFTree) IsImplicant(mtp br.BooleanVector) bool {
uID := 0
for k := 0; k < len(mtp); k++ {
u := tree.Content[uID]
if tree.IsLeaf(uID) {
return true
}
leftChild, rightChild := u.leftChild, u.rightChild
if mtp[k] {
if leftChild >= 0 {
uID = leftChild
continue
} else {
if debug {
if rightChild < 0 {
panic("rightChild must not be nil in IsImplicant")
}
}
uID = rightChild
}
} else {
if rightChild >= 0 {
uID = rightChild
continue
} else {
return false
}
}
}
if debug {
if !(tree.Content[uID].leftChild < 0 && tree.Content[uID].rightChild < 0) {
panic("rightChild and leftChild must be nil in IsImplicant")
}
}
return true
}
func (tree *DNFTree) IsRegular(mtps []br.BooleanVector) bool |
// TightenMode describes different modes to tighten the linear program
// before solving it.
//
// There are three different modes described below.
type TightenMode int
const (
TightenNone TightenMode = iota // Add only constraings necessary for solving the problem
TightenNeighbours // Add also constraings between variables x(i) and x(i + 1)
TightenAll // Add additional constraints between all variable pairs
)
type LinearProgram struct {
Renaming, ReverseRenaming []int
Tree *DNFTree
Winder br.WinderMatrix
LP *golp.LP
MFPs, MTPs []br.BooleanVector
Phi br.ClauseSet
Nbvar int
}
// NewLinearProgram creates a new lp given the DNF ϕ.
//
// It will however not create the actual program or the tree, this must be done
// somewhere else, it only creates the root node.
//
// Important note: For our algorithm to work the variables must be sorted
// according to their importance. Since this is not always the case (only
// during testing and some very special cases) this method will do this for
// you, i.e. it will create the winder matrix and then rename all
// variables accordingly. So the DNF we store in the root node is the
// renamed DNF. But we also store the mapping that caused this renaming
// in the field Renaming. This slice stores for each "old" variable
// the id in the new tree, i.e. a lookup tree.Renaming[id] gives you the
// id of the variable in the new tree.
// The reverse mapping, i.e. new variable → old variable is stored in
// ReverseRenaming.
//
// If you don't need the renaming set sortMatrix to false, in this case
// the matrix will work properly but the variables don't get sorted.
// That is only set it to false if you know that the ordering of the variables
// is already correct.
// Renaming and ReverseRenaming will be set to nil in this case.
//
// Also the clauses in the DNF must be sorted in increasing order.
// If you don't want the clauses to get sorted set sortClauses to false.
// Of course this only makes sense if also sortMatrix is set to false,
// otherwise the new dnf might not be sorted.
// This functions will sort them in this case nonetheless.
//
// The variables in the DNF have to be 0 <= v < nbar (so nbvar must be correct
// and variables start with 0).
// Also each variable should appear at least once in the DNF, what happens
// otherwise is not tested yet.
func NewLinearProgram(phi br.ClauseSet, nbvar int, sortMatrix, sortClauses bool) *LinearProgram {
tree := NewDNFTree(nbvar)
newDNF, winder, renaming, reverseRenaming := InitLP(phi, nbvar, sortMatrix)
if sortMatrix || sortClauses {
newDNF.SortAll()
}
dnfType := isFinal(newDNF)
rootID := tree.CreateRoot(newDNF, dnfType != NotFinal)
if debug {
if rootID != 0 {
panic("Expected root id to be 0, in NewLinearProgram")
}
}
return &LinearProgram{Renaming: renaming,
ReverseRenaming: reverseRenaming,
Tree: tree,
Winder: winder,
LP: nil,
MFPs: nil,
MTPs: nil,
Phi: newDNF,
Nbvar: nbvar,
}
}
// InitLP initializes the lp, that is it creates the Winder matrix for
// (the renamed) ϕ.
// It will also compute Renaming and ReverseRenaming as discussed in
// NewLinearProgram.
//
// It returns first the renamedDNF, the Winder matrix, then Renaming and then
// ReverseRenaming.
// If sortMatrix is false the old dnf will be returned.
func InitLP(phi br.ClauseSet, nbvar int, sortMatrix bool) (br.ClauseSet, br.WinderMatrix, []int, []int) {
newDNF := phi
var renaming, reverseRenaming []int = nil, nil
winder := br.NewWinderMatrix(phi, nbvar, true)
if sortMatrix {
renaming = make([]int, nbvar)
reverseRenaming = make([]int, nbvar)
// sort the matrix
winder.Sort()
// create the renaming
for newVariableId, row := range winder {
renaming[row[len(row)-1]] = newVariableId
reverseRenaming[newVariableId] = row[len(row)-1]
}
newDNF = make([]br.Clause, len(phi))
// clone each clause
// we'll do that concurrently
var wg sync.WaitGroup
wg.Add(len(phi))
for i := 0; i < len(phi); i++ {
go func(index int) {
clause := phi[index]
var newClause br.Clause = make([]int, len(clause))
for j, oldID := range clause {
newClause[j] = renaming[oldID]
}
newDNF[index] = newClause
wg.Done()
}(i)
}
wg.Wait()
}
return newDNF, winder, renaming, reverseRenaming
}
func (lp LinearProgram) Solve(tighten TightenMode, regTest bool) (*LPB, error) {
// create minimal true points
mtps := ComputeMTPs(lp.Phi, lp.Nbvar)
lp.MTPs = mtps
// if regularity test should be beformed create the DNF tree
if regTest {
lp.Tree.BuildTree()
if !lp.Tree.IsRegular(mtps) {
return nil, errors.New("DNF is not regular")
}
}
// compute maximal false points
mfps := ComputeMFPs(mtps, true)
lp.MFPs = mfps
// setup the linear program
program, setupErr := FormulateLP(mtps, mfps, lp.Nbvar, lp.Winder, tighten)
if setupErr != nil {
return nil, setupErr
}
lp.LP = program
// try to convert it
return SolveLP(program)
}
// ComputeMTPs computes the set of minimal true points of a minimal ϕ.
// Since ϕ is minimal this is easy: Each clause defines exactly one minimal
// true point.
func ComputeMTPs(phi br.ClauseSet, nbvar int) []br.BooleanVector {
res := make([]br.BooleanVector, len(phi))
for i, clause := range phi {
point := br.NewBooleanVector(nbvar)
res[i] = point
for _, v := range clause {
point[v] = true
}
}
return res
}
// TODO test this with 0, I don't know what happens to the wait group
// otherwise, or just never call it with a DNF with zero clauses
func ComputeMFPs(mtps []br.BooleanVector, sortPoints bool) []br.BooleanVector {
// first sort the mtps
if sortPoints {
cmp := func(i, j int) bool {
p1, p2 := mtps[i], mtps[j]
if debug {
if len(p1) != len(p2) {
panic("MTPS must be of same length in ComputeMFPs")
}
}
size := len(p1)
for k := 0; k < size; k++ {
val1, val2 := p1[k], p2[k]
if (!val1) && val2 {
return true
} else if val1 && (!val2) {
return false
}
}
if debug {
panic("Must not reach this state in ComputeMFPs")
}
return false
}
sort.Slice(mtps, cmp)
}
// compute nu, we do this concurrently
var wg sync.WaitGroup
wg.Add(len(mtps) - 1)
nu := make([]int, len(mtps))
for i := 1; i < len(mtps); i++ {
go func(index int) {
vars := len(mtps[index])
for j := 0; j < vars; j++ {
val1 := mtps[index-1][j]
val2 := mtps[index][j]
if (!val1) && val2 {
nu[index] = j + 1
break
}
}
wg.Done()
}(i)
}
wg.Wait()
// create the actual points, again we do that concurrently and communicate
// via a channel
// we range over that channel so we must not forget to close it!
res := make([]br.BooleanVector, 0, 10)
// start a function that listens on the channel and adds all points to the
// result
// we use a done channel to signal when all points have been added
resChan := make(chan br.BooleanVector, 10)
done := make(chan bool)
go func() {
for point := range resChan {
res = append(res, point)
}
done <- true
}()
// in the wait group we wait until for all i we've added all points
// after all points were written to the channel we close the channel and then
// wait until they have been added to result
wg.Add(len(mtps))
for i := 0; i < len(mtps); i++ {
go func(index int) {
point := mtps[index]
vars := len(point)
for j := nu[index]; j < vars; j++ {
if point[j] {
if debug {
if nu[index] > j {
panic("nu[i] must be <= j in ComputeMFPs")
}
}
newPoint := point.Clone()
newPoint[j] = false
for k := j + 1; k < vars; k++ {
newPoint[k] = true
}
resChan <- newPoint
}
}
wg.Done()
}(i)
}
wg.Wait()
close(resChan)
// now wait until all points were added to res
<-done
return res
}
// FormulateLP will formulate the linear program to solve.
// It will set the following constraings:
// 1. All variables must be of type int (note that this is really bad for
// the runtime of lpsolve)
// 2. For each minimal true point (a1, ..., ak) where ai are the variables
// that are true a constraint that says that the sum of
// all variables must be ≥ the degree
// that is we transform the problem a bit and get:
// a1 + ... + ak ≥ d ⇔ a1 + ... + ak -d ≥ 0
// 3. For each maximal false point (a1, ..., ak) where are ai are the variables
// that are true a constraint that says that the sum of all variables
// must be < the degree:
// a1 + ... + ak < d
// because lpsolve only allows ≤ we transform this to
// a1 + ... + ak ≤ d - 1 ⇔ a1 + ... + ak - ≤ -1
//
// The additional constraints depend on the mode:
// If mode is TightenNeighbours we compare all variables w(i) and w(i+1).
// We know that it must always hold that w(i) ≥ w(i+1), but it could also
// be w(i) = w(i+1), we find that out by comparing the Winder matrix entries.
// So we have w(i) ≥ w(i+1) ⇔ w(i) - w(i+1) >= 0 or w(i) - w(i+1) = 0.
// TODO we can make this easily concurrent
func FormulateLP(mtps, mfps []br.BooleanVector, nbvar int, winder br.WinderMatrix, tighten TightenMode) (*golp.LP, error) {
// go uses zero based ids, so all variables have ids between 0 and nbvar -1
// the degree has id nbvar
degreeID := nbvar
lp := golp.NewLP(0, nbvar+1)
// set int constraing on all variables
for column := 0; column < nbvar+1; column++ {
lp.SetInt(column, true)
}
for _, mtp := range mtps {
// now add the constraint
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mtp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
// add the row
if err := lp.AddConstraintSparse(row, golp.GE, 0); err != nil {
return nil, err
}
}
for _, mfp := range mfps {
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mfp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
if err := lp.AddConstraintSparse(row, golp.LE, -1); err != nil {
return nil, err
}
}
// now we add additional constraints, depending on the mode
switch {
case tighten == TightenNeighbours:
// add a constraint for neighbouring variables
// we already know that w(i) ≥ w(i+1), but we could already conclude
// that they must be equal
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
for i := 1; i < nbvar; i++ {
// compare both rows
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
var constraint golp.ConstraintType = golp.GE
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
if compRes == 0 {
constraint = golp.EQ
}
// now update the row and add the constraint
entry1.Col = i - 1
entry2.Col = i
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, constraint, 0); err != nil {
return nil, err
}
}
case tighten == TightenAll && nbvar > 0:
// first we will compare each entry i with i+1 and save the comparison
// result between i and i + 1
// we make use of the transitivity of the comparison and later we don't
// have to compare matrix rows again.
// To add all pairs between i and j where i < j we simply have to lookup
// the precomputed results: as long as the comparison result is = the
// variables must be equal, after that only ≥
// TODO would be nice if someone checked this... really confusing
// with all this index stuff ;)
precomputed := make([]int, nbvar-1)
for i := 1; i < nbvar; i++ {
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
precomputed[i-1] = compRes
}
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
// now add all variable pair results
for i := 0; i < nbvar; i++ {
entry1.Col = i
// first find the longest sequence s.t. the variables are equal
j := i + 1
// loop as long as j is equivalent to its predecessor
// as long as this is the case i is equal to j
for ; j < nbvar && precomputed[j-1] == 0; j++ {
// add eq constraing
entry2.Col = j
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.EQ, 0); err != nil {
return nil, err
}
}
// for all remaining j simply add ≥ constraint
for ; j < nbvar; j++ {
entry2.Col = j
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.GE, 0); err != nil {
return nil, err
}
}
}
}
// TODO some objective must be set
// I only came up with this, however this will also keep the Coefficients
// small...
obj := make([]float64, nbvar+1)
// for i := 0; i < nbvar+1; i++ {
// obj[i] = 1.0
// }
obj[0] = 0.0
lp.SetObjFn(obj)
return lp, nil
}
// TODO only call if there is at least one variable
func SolveLP(lp *golp.LP) (*LPB, error) {
lp.SetVerboseLevel(golp.CRITICAL)
convRes := lp.Solve()
// TODO I've added suboptiomal, this should be ok as well?
// TODO seems the constants in golp are wrong...
// we should use them but it's broken :(
if convRes != golp.OPTIMAL && convRes != golp.SUBOPTIMAL {
return nil, fmt.Errorf("Can't solve linear program, lpsolve solution type is %v", convRes)
}
vars := lp.Variables()
coeffs := make([]LPBCoeff, len(vars)-1)
for i, asFloat := range vars[:len(vars)-1] {
// just to be sure
asFloat = math.Floor(asFloat)
coeff := LPBCoeff(asFloat)
coeffs[i] = coeff
}
threshold := LPBCoeff(vars[len(vars)-1])
return NewLPB(threshold, coeffs), nil
}
// LPSolver implements the DNFToLPB interface by using the linear programming
// algorithm.
//
// Ther are some options you can change, see NewLPSolver, NewLinearProgram and
// NewLinearProgram.Solve for more details.
//
// It will also rename the variables in the LPB again, that is if the variables
// were renamed for our algorithm to work it will rename the resulting LPB
// correctly.
type LPSolver struct {
SortMatrix, SortClauses, RegTest bool
Tighten TightenMode
}
// NewLPSolver returns a new LPSolver with SortMatrix, SortClauses and RegTest
// set to true.
//
// For details of these variables see NewLinearProgram and LinearProgram.Solve
// for more details.
//
// tighten describes how many additional constraints should be added to the
// lp, see TightenMode documentation for more details.
func NewLPSolver(tighten TightenMode) *LPSolver {
return &LPSolver{SortMatrix: true,
SortClauses: true,
RegTest: true,
Tighten: tighten,
}
}
// Convert does everything required to compute the LPB: It sets up the program
// and tries to solve it.
// It will also undo the renaming if required.
func (s *LPSolver) Convert(phi br.ClauseSet, nbvar int) (*LPB, error) {
lp := NewLinearProgram(phi, nbvar, s.SortMatrix, s.SortClauses)
res, err := lp.Solve(s.Tighten, s.RegTest)
if err != nil {
return nil, err
}
// undo renaming
return res.Rename(lp.ReverseRenaming), nil
}
| {
numRuns := tree.Nbvar - 1
res := true
// we will do this concurrently:
// for each mtp iterate over all variable combinations and perform the test
// and write the result to a channel
// this also has some drawback: we need to wait for all mtps to finish
// otherwise we would need some context wish would be too much here
// so they all must write a result, even if one already returns false...
report := make(chan bool, 10)
// channel to report once we read all results
done := make(chan bool)
go func() {
for i := 0; i < len(mtps); i++ {
nxt := <-report
if !nxt {
res = false
}
}
done <- true
}()
for k := 0; k < len(mtps); k++ {
go func(index int) {
mtp := mtps[index]
check := true
for i := 0; i < numRuns; i++ {
if (!mtp[i]) && (mtp[i+1]) {
// change the positions in the point, after the implicant test
// we will change them again
mtp[i] = true
mtp[i+1] = false
isImplicant := tree.IsImplicant(mtp)
mtp[i] = false
mtp[i+1] = true
if !isImplicant {
check = false
break
}
}
}
report <- check
}(k)
}
// wait until all results are there
<-done
return res
} | identifier_body |
lp.go | // Copyright 2017 Fabian Wenzelmann <[email protected]>, Christian Schilling,
// Jan-Georg Smaus
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lpb
import (
"errors"
"fmt"
"math"
"sort"
"sync"
br "github.com/FabianWe/boolrecognition"
"github.com/draffensperger/golp"
)
// debug is used to panic in some conditions, if tested properly set to false.
const debug = true
// DNFTreeNodeContent is a node in the tree we construct for the regularity
// test. Each node stores a DNF, the information if that DNF is final (i.e.
// true or false), its depth and two children.
//
// The children are stored by ID, in the tree we store a list of all nodes
// and can therefore retrieve the actual node.
type DNFTreeNodeContent struct {
phi br.ClauseSet
leftChild, rightChild int
final bool
depth int
}
// A DNFTree is a collection of DNFTreeNodeContent objects.
// The root note is stored on position 0.
type DNFTree struct {
Content []*DNFTreeNodeContent
Nbvar int
}
// NewDNFTree returns an empty tree containing no nodes.
func NewDNFTree(nbvar int) *DNFTree {
return &DNFTree{Content: nil, Nbvar: nbvar}
}
// CreateNodeEntry creates a new node given its DNF, depth and the information
// if that DNF is final.
//
// It will append the new node to the tree and return the index of the new node.
func (tree *DNFTree) CreateNodeEntry(phi br.ClauseSet, depth int, isFinal bool) int {
n := &DNFTreeNodeContent{phi, -1, -1, isFinal, depth}
tree.Content = append(tree.Content, n)
return len(tree.Content) - 1
}
// CreateRoot creates a root node and returns its ID (should be 0).
func (tree *DNFTree) CreateRoot(phi br.ClauseSet, isFinal bool) int {
return tree.CreateNodeEntry(phi, 0, isFinal)
}
// CreateLeftChild creates a new node and sets the left child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateLeftChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateLeftChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateLeftChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.leftChild = id
return id
}
// CreateLeftChild creates a new node and sets the right child of nodeID
// to this node. Returns the ID of the new node.
func (tree *DNFTree) CreateRightChild(nodeID int, phi br.ClauseSet, isFinal bool) int {
if debug {
if nodeID < 0 {
panic("Expected nodeID >= 0 in CreateRightChild")
}
if nodeID >= len(tree.Content) {
panic("Expected nodeID < len(content) in CreateRightChild")
}
}
n := tree.Content[nodeID]
id := tree.CreateNodeEntry(phi, n.depth+1, isFinal)
n.rightChild = id
return id
}
// IsLeaf checks if the node is a leaf (has no child nodes).
func (tree *DNFTree) IsLeaf(nodeID int) bool {
n := tree.Content[nodeID]
return n.leftChild < 0 && n.rightChild < 0
}
type LPSplitResult struct {
Final bool
Phi br.ClauseSet
}
func (tree *DNFTree) Split(nodeID int) (*LPSplitResult, *LPSplitResult) {
n := tree.Content[nodeID]
variable := n.depth
first, second := br.NewClauseSet(len(n.phi)), br.NewClauseSet(len(n.phi))
for _, clause := range n.phi {
// check if the variable is contained
if len(clause) > 0 && clause[0] == variable {
// remove variable and add to the first result
first = append(first, clause[1:])
} else {
// variable not contained, so just add the complete clause
second = append(second, clause)
}
}
isFirstFinal, isSecondFinal := isFinal(first) != NotFinal, isFinal(second) != NotFinal
return &LPSplitResult{isFirstFinal, first}, &LPSplitResult{isSecondFinal, second}
}
// BuildTree will build the whole tree. The root note must be set already.
func (tree *DNFTree) BuildTree() {
if debug {
if len(tree.Content) != 1 {
panic("Expected a tree containing exactly one node (the root) in BuildTree")
}
}
if tree.Content[0].final {
// for true and false there is nothing to do
return
}
// create a queue that stores the node ids that must be explored
// add first node (root) to it
waiting := []int{0}
for len(waiting) != 0 {
nextID := waiting[0]
waiting = waiting[1:]
next := tree.Content[nextID]
if next.final {
// no splitting required for final node
continue
}
// split the node
first, second := tree.Split(nextID)
if first.Final {
if len(first.Phi) != 0 {
leftID := tree.CreateLeftChild(nextID, first.Phi, true)
waiting = append(waiting, leftID)
}
// TODO why only in this case?
} else {
leftID := tree.CreateLeftChild(nextID, first.Phi, false)
waiting = append(waiting, leftID)
}
if second.Final {
if len(second.Phi) != 0 {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
} else {
rightID := tree.CreateRightChild(nextID, second.Phi, true)
waiting = append(waiting, rightID)
}
}
}
func (tree *DNFTree) IsImplicant(mtp br.BooleanVector) bool {
uID := 0
for k := 0; k < len(mtp); k++ {
u := tree.Content[uID]
if tree.IsLeaf(uID) {
return true
}
leftChild, rightChild := u.leftChild, u.rightChild
if mtp[k] {
if leftChild >= 0 {
uID = leftChild
continue
} else {
if debug {
if rightChild < 0 {
panic("rightChild must not be nil in IsImplicant")
}
}
uID = rightChild
}
} else {
if rightChild >= 0 {
uID = rightChild
continue
} else {
return false
}
}
}
if debug {
if !(tree.Content[uID].leftChild < 0 && tree.Content[uID].rightChild < 0) {
panic("rightChild and leftChild must be nil in IsImplicant")
}
}
return true
}
func (tree *DNFTree) IsRegular(mtps []br.BooleanVector) bool {
numRuns := tree.Nbvar - 1
res := true
// we will do this concurrently:
// for each mtp iterate over all variable combinations and perform the test
// and write the result to a channel
// this also has some drawback: we need to wait for all mtps to finish
// otherwise we would need some context wish would be too much here
// so they all must write a result, even if one already returns false...
report := make(chan bool, 10)
// channel to report once we read all results
done := make(chan bool)
go func() {
for i := 0; i < len(mtps); i++ {
nxt := <-report
if !nxt {
res = false
}
}
done <- true
}()
for k := 0; k < len(mtps); k++ {
go func(index int) {
mtp := mtps[index]
check := true
for i := 0; i < numRuns; i++ {
if (!mtp[i]) && (mtp[i+1]) {
// change the positions in the point, after the implicant test
// we will change them again
mtp[i] = true
mtp[i+1] = false
isImplicant := tree.IsImplicant(mtp)
mtp[i] = false
mtp[i+1] = true
if !isImplicant {
check = false
break
}
}
}
report <- check
}(k)
}
// wait until all results are there
<-done
return res
}
// TightenMode describes different modes to tighten the linear program
// before solving it.
//
// There are three different modes described below.
type TightenMode int
const (
TightenNone TightenMode = iota // Add only constraings necessary for solving the problem
TightenNeighbours // Add also constraings between variables x(i) and x(i + 1)
TightenAll // Add additional constraints between all variable pairs
)
type LinearProgram struct {
Renaming, ReverseRenaming []int
Tree *DNFTree
Winder br.WinderMatrix
LP *golp.LP
MFPs, MTPs []br.BooleanVector
Phi br.ClauseSet
Nbvar int
}
// NewLinearProgram creates a new lp given the DNF ϕ.
//
// It will however not create the actual program or the tree, this must be done
// somewhere else, it only creates the root node.
//
// Important note: For our algorithm to work the variables must be sorted
// according to their importance. Since this is not always the case (only
// during testing and some very special cases) this method will do this for
// you, i.e. it will create the winder matrix and then rename all
// variables accordingly. So the DNF we store in the root node is the
// renamed DNF. But we also store the mapping that caused this renaming
// in the field Renaming. This slice stores for each "old" variable
// the id in the new tree, i.e. a lookup tree.Renaming[id] gives you the
// id of the variable in the new tree.
// The reverse mapping, i.e. new variable → old variable is stored in
// ReverseRenaming.
//
// If you don't need the renaming set sortMatrix to false, in this case
// the matrix will work properly but the variables don't get sorted.
// That is only set it to false if you know that the ordering of the variables
// is already correct.
// Renaming and ReverseRenaming will be set to nil in this case.
//
// Also the clauses in the DNF must be sorted in increasing order.
// If you don't want the clauses to get sorted set sortClauses to false.
// Of course this only makes sense if also sortMatrix is set to false,
// otherwise the new dnf might not be sorted.
// This functions will sort them in this case nonetheless.
//
// The variables in the DNF have to be 0 <= v < nbar (so nbvar must be correct
// and variables start with 0).
// Also each variable should appear at least once in the DNF, what happens
// otherwise is not tested yet.
func NewLinearProgram(phi br.ClauseSet, nbvar int, sortMatrix, sortClauses bool) *LinearProgram {
tree := NewDNFTree(nbvar)
newDNF, winder, renaming, reverseRenaming := InitLP(phi, nbvar, sortMatrix)
if sortMatrix || sortClauses {
newDNF.SortAll()
}
dnfType := isFinal(newDNF)
rootID := tree.CreateRoot(newDNF, dnfType != NotFinal)
if debug {
if rootID != 0 {
panic("Expected root id to be 0, in NewLinearProgram")
}
}
return &LinearProgram{Renaming: renaming,
ReverseRenaming: reverseRenaming,
Tree: tree,
Winder: winder,
LP: nil,
MFPs: nil,
MTPs: nil,
Phi: newDNF,
Nbvar: nbvar,
}
}
// InitLP initializes the lp, that is it creates the Winder matrix for
// (the renamed) ϕ.
// It will also compute Renaming and ReverseRenaming as discussed in
// NewLinearProgram.
//
// It returns first the renamedDNF, the Winder matrix, then Renaming and then
// ReverseRenaming.
// If sortMatrix is false the old dnf will be returned.
func InitLP(phi br.ClauseSet, nbvar int, sortMatrix bool) (br.ClauseSet, br.WinderMatrix, []int, []int) {
newDNF := phi
var renaming, reverseRenaming []int = nil, nil
winder := br.NewWinderMatrix(phi, nbvar, true)
if sortMatrix {
renaming = make([]int, nbvar)
reverseRenaming = make([]int, nbvar)
// sort the matrix
winder.Sort()
// create the renaming
for newVariableId, row := range winder {
renaming[row[len(row)-1]] = newVariableId
reverseRenaming[newVariableId] = row[len(row)-1]
}
newDNF = make([]br.Clause, len(phi))
// clone each clause
// we'll do that concurrently
var wg sync.WaitGroup
wg.Add(len(phi))
for i := 0; i < len(phi); i++ {
go func(index int) {
clause := phi[index]
var newClause br.Clause = make([]int, len(clause))
for j, oldID := range clause {
newClause[j] = renaming[oldID]
}
newDNF[index] = newClause
wg.Done()
}(i)
}
wg.Wait()
}
return newDNF, winder, renaming, reverseRenaming
}
func (lp LinearProgram) Solve(tighten TightenMode, regTest bool) (*LPB, error) {
// create minimal true points
mtps := ComputeMTPs(lp.Phi, lp.Nbvar)
lp.MTPs = mtps
// if regularity test should be beformed create the DNF tree
if regTest {
lp.Tree.BuildTree()
if !lp.Tree.IsRegular(mtps) {
return nil, errors.New("DNF is not regular")
}
}
// compute maximal false points
mfps := ComputeMFPs(mtps, true)
lp.MFPs = mfps
// setup the linear program
program, setupErr := FormulateLP(mtps, mfps, lp.Nbvar, lp.Winder, tighten)
if setupErr != nil {
return nil, setupErr
}
lp.LP = program
// try to convert it
return SolveLP(program)
}
// ComputeMTPs computes the set of minimal true points of a minimal ϕ.
// Since ϕ is minimal this is easy: Each clause defines exactly one minimal
// true point.
func Comput | r.ClauseSet, nbvar int) []br.BooleanVector {
res := make([]br.BooleanVector, len(phi))
for i, clause := range phi {
point := br.NewBooleanVector(nbvar)
res[i] = point
for _, v := range clause {
point[v] = true
}
}
return res
}
// TODO test this with 0, I don't know what happens to the wait group
// otherwise, or just never call it with a DNF with zero clauses
func ComputeMFPs(mtps []br.BooleanVector, sortPoints bool) []br.BooleanVector {
// first sort the mtps
if sortPoints {
cmp := func(i, j int) bool {
p1, p2 := mtps[i], mtps[j]
if debug {
if len(p1) != len(p2) {
panic("MTPS must be of same length in ComputeMFPs")
}
}
size := len(p1)
for k := 0; k < size; k++ {
val1, val2 := p1[k], p2[k]
if (!val1) && val2 {
return true
} else if val1 && (!val2) {
return false
}
}
if debug {
panic("Must not reach this state in ComputeMFPs")
}
return false
}
sort.Slice(mtps, cmp)
}
// compute nu, we do this concurrently
var wg sync.WaitGroup
wg.Add(len(mtps) - 1)
nu := make([]int, len(mtps))
for i := 1; i < len(mtps); i++ {
go func(index int) {
vars := len(mtps[index])
for j := 0; j < vars; j++ {
val1 := mtps[index-1][j]
val2 := mtps[index][j]
if (!val1) && val2 {
nu[index] = j + 1
break
}
}
wg.Done()
}(i)
}
wg.Wait()
// create the actual points, again we do that concurrently and communicate
// via a channel
// we range over that channel so we must not forget to close it!
res := make([]br.BooleanVector, 0, 10)
// start a function that listens on the channel and adds all points to the
// result
// we use a done channel to signal when all points have been added
resChan := make(chan br.BooleanVector, 10)
done := make(chan bool)
go func() {
for point := range resChan {
res = append(res, point)
}
done <- true
}()
// in the wait group we wait until for all i we've added all points
// after all points were written to the channel we close the channel and then
// wait until they have been added to result
wg.Add(len(mtps))
for i := 0; i < len(mtps); i++ {
go func(index int) {
point := mtps[index]
vars := len(point)
for j := nu[index]; j < vars; j++ {
if point[j] {
if debug {
if nu[index] > j {
panic("nu[i] must be <= j in ComputeMFPs")
}
}
newPoint := point.Clone()
newPoint[j] = false
for k := j + 1; k < vars; k++ {
newPoint[k] = true
}
resChan <- newPoint
}
}
wg.Done()
}(i)
}
wg.Wait()
close(resChan)
// now wait until all points were added to res
<-done
return res
}
// FormulateLP will formulate the linear program to solve.
// It will set the following constraings:
// 1. All variables must be of type int (note that this is really bad for
// the runtime of lpsolve)
// 2. For each minimal true point (a1, ..., ak) where ai are the variables
// that are true a constraint that says that the sum of
// all variables must be ≥ the degree
// that is we transform the problem a bit and get:
// a1 + ... + ak ≥ d ⇔ a1 + ... + ak -d ≥ 0
// 3. For each maximal false point (a1, ..., ak) where are ai are the variables
// that are true a constraint that says that the sum of all variables
// must be < the degree:
// a1 + ... + ak < d
// because lpsolve only allows ≤ we transform this to
// a1 + ... + ak ≤ d - 1 ⇔ a1 + ... + ak - ≤ -1
//
// The additional constraints depend on the mode:
// If mode is TightenNeighbours we compare all variables w(i) and w(i+1).
// We know that it must always hold that w(i) ≥ w(i+1), but it could also
// be w(i) = w(i+1), we find that out by comparing the Winder matrix entries.
// So we have w(i) ≥ w(i+1) ⇔ w(i) - w(i+1) >= 0 or w(i) - w(i+1) = 0.
// TODO we can make this easily concurrent
func FormulateLP(mtps, mfps []br.BooleanVector, nbvar int, winder br.WinderMatrix, tighten TightenMode) (*golp.LP, error) {
// go uses zero based ids, so all variables have ids between 0 and nbvar -1
// the degree has id nbvar
degreeID := nbvar
lp := golp.NewLP(0, nbvar+1)
// set int constraing on all variables
for column := 0; column < nbvar+1; column++ {
lp.SetInt(column, true)
}
for _, mtp := range mtps {
// now add the constraint
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mtp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
// add the row
if err := lp.AddConstraintSparse(row, golp.GE, 0); err != nil {
return nil, err
}
}
for _, mfp := range mfps {
row := make([]golp.Entry, 0, nbvar+1)
for j, val := range mfp {
if val {
row = append(row, golp.Entry{Col: j, Val: 1})
}
}
// add -d
row = append(row, golp.Entry{Col: degreeID, Val: -1})
if err := lp.AddConstraintSparse(row, golp.LE, -1); err != nil {
return nil, err
}
}
// now we add additional constraints, depending on the mode
switch {
case tighten == TightenNeighbours:
// add a constraint for neighbouring variables
// we already know that w(i) ≥ w(i+1), but we could already conclude
// that they must be equal
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
for i := 1; i < nbvar; i++ {
// compare both rows
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
var constraint golp.ConstraintType = golp.GE
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
if compRes == 0 {
constraint = golp.EQ
}
// now update the row and add the constraint
entry1.Col = i - 1
entry2.Col = i
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, constraint, 0); err != nil {
return nil, err
}
}
case tighten == TightenAll && nbvar > 0:
// first we will compare each entry i with i+1 and save the comparison
// result between i and i + 1
// we make use of the transitivity of the comparison and later we don't
// have to compare matrix rows again.
// To add all pairs between i and j where i < j we simply have to lookup
// the precomputed results: as long as the comparison result is = the
// variables must be equal, after that only ≥
// TODO would be nice if someone checked this... really confusing
// with all this index stuff ;)
precomputed := make([]int, nbvar-1)
for i := 1; i < nbvar; i++ {
compRes := br.CompareMatrixEntry(winder[i-1], winder[i])
if debug {
if compRes < 0 {
panic("Unsorted Winder matrix in FormulateLP")
}
}
precomputed[i-1] = compRes
}
entry1 := golp.Entry{Col: -1, Val: 1}
entry2 := golp.Entry{Col: -1, Val: -1}
// now add all variable pair results
for i := 0; i < nbvar; i++ {
entry1.Col = i
// first find the longest sequence s.t. the variables are equal
j := i + 1
// loop as long as j is equivalent to its predecessor
// as long as this is the case i is equal to j
for ; j < nbvar && precomputed[j-1] == 0; j++ {
// add eq constraing
entry2.Col = j
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.EQ, 0); err != nil {
return nil, err
}
}
// for all remaining j simply add ≥ constraint
for ; j < nbvar; j++ {
entry2.Col = j
if err := lp.AddConstraintSparse([]golp.Entry{entry1, entry2}, golp.GE, 0); err != nil {
return nil, err
}
}
}
}
// TODO some objective must be set
// I only came up with this, however this will also keep the Coefficients
// small...
obj := make([]float64, nbvar+1)
// for i := 0; i < nbvar+1; i++ {
// obj[i] = 1.0
// }
obj[0] = 0.0
lp.SetObjFn(obj)
return lp, nil
}
// TODO only call if there is at least one variable
func SolveLP(lp *golp.LP) (*LPB, error) {
lp.SetVerboseLevel(golp.CRITICAL)
convRes := lp.Solve()
// TODO I've added suboptiomal, this should be ok as well?
// TODO seems the constants in golp are wrong...
// we should use them but it's broken :(
if convRes != golp.OPTIMAL && convRes != golp.SUBOPTIMAL {
return nil, fmt.Errorf("Can't solve linear program, lpsolve solution type is %v", convRes)
}
vars := lp.Variables()
coeffs := make([]LPBCoeff, len(vars)-1)
for i, asFloat := range vars[:len(vars)-1] {
// just to be sure
asFloat = math.Floor(asFloat)
coeff := LPBCoeff(asFloat)
coeffs[i] = coeff
}
threshold := LPBCoeff(vars[len(vars)-1])
return NewLPB(threshold, coeffs), nil
}
// LPSolver implements the DNFToLPB interface by using the linear programming
// algorithm.
//
// Ther are some options you can change, see NewLPSolver, NewLinearProgram and
// NewLinearProgram.Solve for more details.
//
// It will also rename the variables in the LPB again, that is if the variables
// were renamed for our algorithm to work it will rename the resulting LPB
// correctly.
type LPSolver struct {
SortMatrix, SortClauses, RegTest bool
Tighten TightenMode
}
// NewLPSolver returns a new LPSolver with SortMatrix, SortClauses and RegTest
// set to true.
//
// For details of these variables see NewLinearProgram and LinearProgram.Solve
// for more details.
//
// tighten describes how many additional constraints should be added to the
// lp, see TightenMode documentation for more details.
func NewLPSolver(tighten TightenMode) *LPSolver {
return &LPSolver{SortMatrix: true,
SortClauses: true,
RegTest: true,
Tighten: tighten,
}
}
// Convert does everything required to compute the LPB: It sets up the program
// and tries to solve it.
// It will also undo the renaming if required.
func (s *LPSolver) Convert(phi br.ClauseSet, nbvar int) (*LPB, error) {
lp := NewLinearProgram(phi, nbvar, s.SortMatrix, s.SortClauses)
res, err := lp.Solve(s.Tighten, s.RegTest)
if err != nil {
return nil, err
}
// undo renaming
return res.Rename(lp.ReverseRenaming), nil
}
| eMTPs(phi b | identifier_name |
nexus_label.rs | //! GPT labeling for Nexus devices. The primary partition
//! (/dev/x1) will be used for meta data during, rebuild. The second
//! partition contains the file system.
//!
//! The nexus will adjust internal data structures to offset the IO to the
//! right partition. put differently, when connecting to this device via
//! NVMF or iSCSI it will show up as device with just one partition.
//!
//! When the nexus is removed from the data path and other initiations are
//! used, the data is still accessible and thus removes us has a hard
//! dependency in the data path.
//!
//! # Example:
//!
//! ```bash
//! $ rm /code/disk1.img; truncate -s 1GiB /code/disk1.img
//! $ mctl create gpt -r aio:////code/disk1.img?blk_size=512 -s 1GiB -b
//! $ sgdisk -p /code/disk1.img
//! Found valid GPT with corrupt MBR; using GPT and will write new
//! protective MBR on save.
//! Disk /code//disk1.img: 2097152 sectors, 1024.0 MiB
//! Sector size (logical): 512 bytes
//! Disk identifier (GUID): EAB49A2F-EFEA-45E6-9A1B-61FECE3426DD
//! Partition table holds up to 128 entries
//! Main partition table begins at sector 2 and ends at sector 33
//! First usable sector is 2048, last usable sector is 2097118
//! Partitions will be aligned on 2048-sector boundaries
//! Total free space is 0 sectors (0 bytes)
//!
//! Number Start (sector) End (sector) Size Code Name
//! 1 2048 10239 4.0 MiB FFFF MayaMeta
//! 2 10240 2097118 1019.0 MiB FFFF MayaData
//! ```
//!
//! Notice how two partitions have been created when accessing the disk
//! when shared by the nexus:
//!
//! ```bash
//! $ mctl share gpt
//! "/dev/nbd0"
//!
//! TODO: also note how it complains about a MBR
//!
//! $ lsblk
//! NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
//! sda 8:0 0 50G 0 disk
//! ├─sda1 8:1 0 41.5G 0 part /
//! ├─sda2 8:2 0 7M 0 part [SWAP]
//! └─sda3 8:3 0 511M 0 part /boot
//! sr0 11:0 1 1024M 0 rom
//! nbd0 43:0 0 1019M 0 disk
//! nvme0n1 259:0 0 200G 0 disk /code
//!
//! The nbd0 zero device does not show the partitions
//! ```
use crate::bdev::nexus::Error;
use bincode::{deserialize_from, serialize};
use crc::{crc32, Hasher32};
use serde::{
de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor},
ser::{Serialize, SerializeTuple, Serializer},
};
use std::{
fmt::{self, Display},
io::Cursor,
};
use uuid::{self, parser};
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)]
/// based on RFC4122
pub struct GptGuid {
pub time_low: u32,
pub time_mid: u16,
pub time_high: u16,
pub node: [u8; 8],
}
impl std::str::FromStr for GptGuid {
type Err = parser::ParseError;
fn from_str(uuid: &str) -> Result<Self, Self::Err> {
let fields = uuid::Uuid::from_str(uuid)?;
let fields = fields.as_fields();
Ok(GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
})
}
}
impl std::fmt::Display for GptGuid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
uuid::Uuid::from_fields(
self.time_low,
self.time_mid,
self.time_high,
&self.node,
)
.unwrap()
.to_string()
)
}
}
impl GptGuid {
pub(crate) fn new_random() -> Self {
let fields = uuid::Uuid::new_v4();
let fields = fields.as_fields();
GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)]
pub struct GPTHeader {
/// GPT signature (must be "EFI PART").
pub signature: [u8; 8],
/// 00 00 01 00 up til version 2.17
pub revision: [u8; 4],
/// GPT header size (92 bytes)
pub header_size: u32,
/// CRC32 of the header.
pub self_checksum: u32,
pub reserved: [u8; 4],
/// primary lba where the header is located
pub lba_self: u64,
/// alternative lba where the header is located (backup)
pub lba_alt: u64,
/// first usable lba
pub lba_start: u64,
/// last usable lba
pub lba_end: u64,
/// 16 bytes representing the GUID of the GPT.
pub guid: GptGuid,
/// lba of where to find the partition table
pub lba_table: u64,
/// number of partitions, most tools set this to 128
pub num_entries: u32,
/// Size of element
pub entry_size: u32,
/// CRC32 checksum of the partition array.
pub table_crc: u32,
}
impl GPTHeader {
/// converts a slice into a gpt header and verifies the validity of the data
pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> {
let mut reader = Cursor::new(slice);
let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap();
if gpt.header_size != 92
|| gpt.signature != [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54]
|| gpt.revision != [0x00, 0x00, 0x01, 0x00]
{
return Err(Error::Invalid);
}
let crc = gpt.self_checksum;
gpt.self_checksum = 0;
gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap());
if gpt.self_checksum != crc {
info!("GPT label crc mismatch");
return Err(Error::Invalid);
}
if gpt.lba_self > gpt.lba_alt {
std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt)
}
Ok(gpt)
}
/// checksum the header with the checksum field itself set 0
pub fn checksum(&mut self) -> u32 {
self.self_checksum = 0;
self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap());
self.self_checksum
}
pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self {
let fields = guid.as_fields();
GPTHeader {
signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54],
revision: [0x00, 0x00, 0x01, 0x00],
header_size: 92,
self_checksum: 0,
reserved: [0; 4],
lba_self: 1,
lba_alt: num_blocks - 1,
lba_start: u64::from((1 << 20) / blk_size),
lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1,
guid: GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
},
lba_table: 2,
num_entries: 2,
entry_size: 128,
table_crc: 0,
}
}
pub fn to_backup(&self) -> Self {
let mut secondary = *self;
secondary.lba_self = self.lba_alt;
secondary.lba_alt = self.lba_self;
secondary.lba_table = self.lba_end + 1;
secondary
}
}
#[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)]
pub struct GptEntry {
/// GUID type, some of them are assigned/reserved for example to Linux
pub ent_type: GptGuid,
/// entry GUID, can be anything typically random
pub ent_guid: GptGuid,
/// start lba for this entry
pub ent_start: u64,
/// end lba for this entry
pub ent_end: u64,
/// entry attributes, according to do the docs bit 0 MUST be zero
pub ent_attr: u64,
/// utf16 name of the partition entry, do not confuse this fs labels!
pub ent_name: GptName,
}
impl GptEntry {
/// converts a slice into a partition array
pub fn from_slice(
slice: &[u8],
parts: u32,
) -> Result<Vec<GptEntry>, Error> {
let mut reader = Cursor::new(slice);
let mut part_vec = Vec::new();
// TODO 128 should be passed in as a argument
for _ in 0 .. parts {
part_vec.push(deserialize_from(&mut reader)?);
}
Ok(part_vec)
}
/// calculate the checksum over the partitions table
pub fn checksum(parts: &[GptEntry]) -> u32 {
let mut digest = crc32::Digest::new(crc32::IEEE);
for p in parts {
digest.write(&serialize(p).unwrap());
}
digest.sum32()
}
}
#[derive(Debug, PartialEq, Serialize, Clone)]
/// The nexus label is standard GPT label (such that you can use it without us
/// in the data path) The only thing that is really specific to us is the
/// ent_type GUID if we see that attached to a partition, we assume the data in
/// that partition is ours. In the data we will have more magic markers to
/// confirm the assumption but this is step one.
pub struct NexusLabel {
/// the main GPT header
pub primary: GPTHeader,
/// Vector of GPT entries where the first element is considered to be ours
pub partitions: Vec<GptEntry>,
}
impl NexusLabel {
/// returns the offset to the first data segment
pub(crate) fn offset(&self) -> u64 {
self.partitions[1].ent_start
}
/// returns the number of total blocks in this segment
pub(crate) fn num_blocks(&self) -> u64 {
self.partitions[1].ent_end - self.partitions[1].ent_start
}
}
impl Display for NexusLabel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "GUID: {}", self.primary.guid.to_string())?;
writeln!(f, "\tHeader crc32 {}", self.primary.self_checksum)?;
writeln!(f, "\tPartition table crc32 {}", self.primary.table_crc)?;
for i in 0 .. self.partitions.len() {
writeln!(f, "\tPartition number {}", i)?;
writeln!(f, "\tGUID: {}", self.partitions[i].ent_guid.to_string())?;
writeln!(
f,
"\tType GUID: {}",
self.partitions[i].ent_type.to_string()
)?;
writeln!(
f,
"\tLogical block start: {}, end: {}",
self.partitions[i].ent_start, self.partitions[i].ent_end
)?;
}
Ok(())
}
}
// for arrays bigger then 32 elements, things start to get unimplemented
// in terms of derive and what not. So we create a struct with a string
// and tell serde how to use it during (de)serializing
struct GpEntryNameVisitor;
impl<'de> Deserialize<'de> for GptName {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor)
}
}
impl Serialize for GptName {
fn serialize<S>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// we cant use serialize_type_struct here as we want exactly 72 bytes
let mut s = serializer.serialize_tuple(36)?;
let mut out: Vec<u16> = vec![0; 36];
for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) {
*o = i;
}
out.iter().for_each(|e| s.serialize_element(&e).unwrap());
s.end()
}
}
impl<'de> Visitor<'de> for GpEntryNameVisitor {
type Value = GptName;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Invalid GPT partition name")
}
fn visit_seq<A>(self, mut seq: A) -> std::result::Result<GptName, A::Error>
where
A: SeqAccess<'de>,
{
le | (Debug, PartialEq, Default, Clone)]
pub struct GptName {
pub name: String,
}
impl GptName {
pub fn as_str(&self) -> &str {
&self.name
}
}
| t mut out = Vec::new();
let mut end = false;
loop {
match seq.next_element()? {
Some(0) => {
end = true;
}
Some(e) if !end => out.push(e),
_ => break,
}
}
if end {
Ok(GptName {
name: String::from_utf16_lossy(&out),
})
} else {
Err(serde::de::Error::invalid_value(Unexpected::Seq, &self))
}
}
}
#[derive | identifier_body |
nexus_label.rs | //! GPT labeling for Nexus devices. The primary partition
//! (/dev/x1) will be used for meta data during, rebuild. The second
//! partition contains the file system.
//!
//! The nexus will adjust internal data structures to offset the IO to the
//! right partition. put differently, when connecting to this device via
//! NVMF or iSCSI it will show up as device with just one partition.
//!
//! When the nexus is removed from the data path and other initiations are
//! used, the data is still accessible and thus removes us has a hard
//! dependency in the data path.
//!
//! # Example:
//!
//! ```bash
//! $ rm /code/disk1.img; truncate -s 1GiB /code/disk1.img
//! $ mctl create gpt -r aio:////code/disk1.img?blk_size=512 -s 1GiB -b
//! $ sgdisk -p /code/disk1.img
//! Found valid GPT with corrupt MBR; using GPT and will write new
//! protective MBR on save.
//! Disk /code//disk1.img: 2097152 sectors, 1024.0 MiB
//! Sector size (logical): 512 bytes
//! Disk identifier (GUID): EAB49A2F-EFEA-45E6-9A1B-61FECE3426DD
//! Partition table holds up to 128 entries
//! Main partition table begins at sector 2 and ends at sector 33
//! First usable sector is 2048, last usable sector is 2097118
//! Partitions will be aligned on 2048-sector boundaries
//! Total free space is 0 sectors (0 bytes)
//!
//! Number Start (sector) End (sector) Size Code Name
//! 1 2048 10239 4.0 MiB FFFF MayaMeta
//! 2 10240 2097118 1019.0 MiB FFFF MayaData
//! ```
//!
//! Notice how two partitions have been created when accessing the disk
//! when shared by the nexus:
//!
//! ```bash
//! $ mctl share gpt
//! "/dev/nbd0"
//!
//! TODO: also note how it complains about a MBR
//!
//! $ lsblk
//! NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
//! sda 8:0 0 50G 0 disk
//! ├─sda1 8:1 0 41.5G 0 part /
//! ├─sda2 8:2 0 7M 0 part [SWAP]
//! └─sda3 8:3 0 511M 0 part /boot
//! sr0 11:0 1 1024M 0 rom
//! nbd0 43:0 0 1019M 0 disk
//! nvme0n1 259:0 0 200G 0 disk /code
//!
//! The nbd0 zero device does not show the partitions
//! ```
use crate::bdev::nexus::Error;
use bincode::{deserialize_from, serialize};
use crc::{crc32, Hasher32};
use serde::{
de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor},
ser::{Serialize, SerializeTuple, Serializer},
};
use std::{
fmt::{self, Display},
io::Cursor,
};
use uuid::{self, parser};
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)]
/// based on RFC4122
pub struct GptGuid {
pub time_low: u32,
pub time_mid: u16,
pub time_high: u16,
pub node: [u8; 8],
}
impl std::str::FromStr for GptGuid {
type Err = parser::ParseError;
fn from_str(uuid: &str) -> Result<Self, Self::Err> {
let fields = uuid::Uuid::from_str(uuid)?;
let fields = fields.as_fields();
Ok(GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
})
}
}
impl std::fmt::Display for GptGuid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
uuid::Uuid::from_fields(
self.time_low,
self.time_mid,
self.time_high,
&self.node,
)
.unwrap() | impl GptGuid {
pub(crate) fn new_random() -> Self {
let fields = uuid::Uuid::new_v4();
let fields = fields.as_fields();
GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)]
pub struct GPTHeader {
/// GPT signature (must be "EFI PART").
pub signature: [u8; 8],
/// 00 00 01 00 up til version 2.17
pub revision: [u8; 4],
/// GPT header size (92 bytes)
pub header_size: u32,
/// CRC32 of the header.
pub self_checksum: u32,
pub reserved: [u8; 4],
/// primary lba where the header is located
pub lba_self: u64,
/// alternative lba where the header is located (backup)
pub lba_alt: u64,
/// first usable lba
pub lba_start: u64,
/// last usable lba
pub lba_end: u64,
/// 16 bytes representing the GUID of the GPT.
pub guid: GptGuid,
/// lba of where to find the partition table
pub lba_table: u64,
/// number of partitions, most tools set this to 128
pub num_entries: u32,
/// Size of element
pub entry_size: u32,
/// CRC32 checksum of the partition array.
pub table_crc: u32,
}
impl GPTHeader {
/// converts a slice into a gpt header and verifies the validity of the data
pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> {
let mut reader = Cursor::new(slice);
let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap();
if gpt.header_size != 92
|| gpt.signature != [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54]
|| gpt.revision != [0x00, 0x00, 0x01, 0x00]
{
return Err(Error::Invalid);
}
let crc = gpt.self_checksum;
gpt.self_checksum = 0;
gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap());
if gpt.self_checksum != crc {
info!("GPT label crc mismatch");
return Err(Error::Invalid);
}
if gpt.lba_self > gpt.lba_alt {
std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt)
}
Ok(gpt)
}
/// checksum the header with the checksum field itself set 0
pub fn checksum(&mut self) -> u32 {
self.self_checksum = 0;
self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap());
self.self_checksum
}
pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self {
let fields = guid.as_fields();
GPTHeader {
signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54],
revision: [0x00, 0x00, 0x01, 0x00],
header_size: 92,
self_checksum: 0,
reserved: [0; 4],
lba_self: 1,
lba_alt: num_blocks - 1,
lba_start: u64::from((1 << 20) / blk_size),
lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1,
guid: GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
},
lba_table: 2,
num_entries: 2,
entry_size: 128,
table_crc: 0,
}
}
pub fn to_backup(&self) -> Self {
let mut secondary = *self;
secondary.lba_self = self.lba_alt;
secondary.lba_alt = self.lba_self;
secondary.lba_table = self.lba_end + 1;
secondary
}
}
#[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)]
pub struct GptEntry {
/// GUID type, some of them are assigned/reserved for example to Linux
pub ent_type: GptGuid,
/// entry GUID, can be anything typically random
pub ent_guid: GptGuid,
/// start lba for this entry
pub ent_start: u64,
/// end lba for this entry
pub ent_end: u64,
/// entry attributes, according to do the docs bit 0 MUST be zero
pub ent_attr: u64,
/// utf16 name of the partition entry, do not confuse this fs labels!
pub ent_name: GptName,
}
impl GptEntry {
/// converts a slice into a partition array
pub fn from_slice(
slice: &[u8],
parts: u32,
) -> Result<Vec<GptEntry>, Error> {
let mut reader = Cursor::new(slice);
let mut part_vec = Vec::new();
// TODO 128 should be passed in as a argument
for _ in 0 .. parts {
part_vec.push(deserialize_from(&mut reader)?);
}
Ok(part_vec)
}
/// calculate the checksum over the partitions table
pub fn checksum(parts: &[GptEntry]) -> u32 {
let mut digest = crc32::Digest::new(crc32::IEEE);
for p in parts {
digest.write(&serialize(p).unwrap());
}
digest.sum32()
}
}
#[derive(Debug, PartialEq, Serialize, Clone)]
/// The nexus label is standard GPT label (such that you can use it without us
/// in the data path) The only thing that is really specific to us is the
/// ent_type GUID if we see that attached to a partition, we assume the data in
/// that partition is ours. In the data we will have more magic markers to
/// confirm the assumption but this is step one.
pub struct NexusLabel {
/// the main GPT header
pub primary: GPTHeader,
/// Vector of GPT entries where the first element is considered to be ours
pub partitions: Vec<GptEntry>,
}
impl NexusLabel {
/// returns the offset to the first data segment
pub(crate) fn offset(&self) -> u64 {
self.partitions[1].ent_start
}
/// returns the number of total blocks in this segment
pub(crate) fn num_blocks(&self) -> u64 {
self.partitions[1].ent_end - self.partitions[1].ent_start
}
}
impl Display for NexusLabel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "GUID: {}", self.primary.guid.to_string())?;
writeln!(f, "\tHeader crc32 {}", self.primary.self_checksum)?;
writeln!(f, "\tPartition table crc32 {}", self.primary.table_crc)?;
for i in 0 .. self.partitions.len() {
writeln!(f, "\tPartition number {}", i)?;
writeln!(f, "\tGUID: {}", self.partitions[i].ent_guid.to_string())?;
writeln!(
f,
"\tType GUID: {}",
self.partitions[i].ent_type.to_string()
)?;
writeln!(
f,
"\tLogical block start: {}, end: {}",
self.partitions[i].ent_start, self.partitions[i].ent_end
)?;
}
Ok(())
}
}
// for arrays bigger then 32 elements, things start to get unimplemented
// in terms of derive and what not. So we create a struct with a string
// and tell serde how to use it during (de)serializing
struct GpEntryNameVisitor;
impl<'de> Deserialize<'de> for GptName {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor)
}
}
impl Serialize for GptName {
fn serialize<S>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// we cant use serialize_type_struct here as we want exactly 72 bytes
let mut s = serializer.serialize_tuple(36)?;
let mut out: Vec<u16> = vec![0; 36];
for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) {
*o = i;
}
out.iter().for_each(|e| s.serialize_element(&e).unwrap());
s.end()
}
}
impl<'de> Visitor<'de> for GpEntryNameVisitor {
type Value = GptName;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Invalid GPT partition name")
}
fn visit_seq<A>(self, mut seq: A) -> std::result::Result<GptName, A::Error>
where
A: SeqAccess<'de>,
{
let mut out = Vec::new();
let mut end = false;
loop {
match seq.next_element()? {
Some(0) => {
end = true;
}
Some(e) if !end => out.push(e),
_ => break,
}
}
if end {
Ok(GptName {
name: String::from_utf16_lossy(&out),
})
} else {
Err(serde::de::Error::invalid_value(Unexpected::Seq, &self))
}
}
}
#[derive(Debug, PartialEq, Default, Clone)]
pub struct GptName {
pub name: String,
}
impl GptName {
pub fn as_str(&self) -> &str {
&self.name
}
} | .to_string()
)
}
}
| random_line_split |
nexus_label.rs | //! GPT labeling for Nexus devices. The primary partition
//! (/dev/x1) will be used for meta data during, rebuild. The second
//! partition contains the file system.
//!
//! The nexus will adjust internal data structures to offset the IO to the
//! right partition. put differently, when connecting to this device via
//! NVMF or iSCSI it will show up as device with just one partition.
//!
//! When the nexus is removed from the data path and other initiations are
//! used, the data is still accessible and thus removes us has a hard
//! dependency in the data path.
//!
//! # Example:
//!
//! ```bash
//! $ rm /code/disk1.img; truncate -s 1GiB /code/disk1.img
//! $ mctl create gpt -r aio:////code/disk1.img?blk_size=512 -s 1GiB -b
//! $ sgdisk -p /code/disk1.img
//! Found valid GPT with corrupt MBR; using GPT and will write new
//! protective MBR on save.
//! Disk /code//disk1.img: 2097152 sectors, 1024.0 MiB
//! Sector size (logical): 512 bytes
//! Disk identifier (GUID): EAB49A2F-EFEA-45E6-9A1B-61FECE3426DD
//! Partition table holds up to 128 entries
//! Main partition table begins at sector 2 and ends at sector 33
//! First usable sector is 2048, last usable sector is 2097118
//! Partitions will be aligned on 2048-sector boundaries
//! Total free space is 0 sectors (0 bytes)
//!
//! Number Start (sector) End (sector) Size Code Name
//! 1 2048 10239 4.0 MiB FFFF MayaMeta
//! 2 10240 2097118 1019.0 MiB FFFF MayaData
//! ```
//!
//! Notice how two partitions have been created when accessing the disk
//! when shared by the nexus:
//!
//! ```bash
//! $ mctl share gpt
//! "/dev/nbd0"
//!
//! TODO: also note how it complains about a MBR
//!
//! $ lsblk
//! NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
//! sda 8:0 0 50G 0 disk
//! ├─sda1 8:1 0 41.5G 0 part /
//! ├─sda2 8:2 0 7M 0 part [SWAP]
//! └─sda3 8:3 0 511M 0 part /boot
//! sr0 11:0 1 1024M 0 rom
//! nbd0 43:0 0 1019M 0 disk
//! nvme0n1 259:0 0 200G 0 disk /code
//!
//! The nbd0 zero device does not show the partitions
//! ```
use crate::bdev::nexus::Error;
use bincode::{deserialize_from, serialize};
use crc::{crc32, Hasher32};
use serde::{
de::{Deserialize, Deserializer, SeqAccess, Unexpected, Visitor},
ser::{Serialize, SerializeTuple, Serializer},
};
use std::{
fmt::{self, Display},
io::Cursor,
};
use uuid::{self, parser};
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Clone, Copy)]
/// based on RFC4122
pub struct GptGuid {
pub time_low: u32,
pub time_mid: u16,
pub time_high: u16,
pub node: [u8; 8],
}
impl std::str::FromStr for GptGuid {
type Err = parser::ParseError;
fn from_str(uuid: &str) -> Result<Self, Self::Err> {
let fields = uuid::Uuid::from_str(uuid)?;
let fields = fields.as_fields();
Ok(GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
})
}
}
impl std::fmt::Display for GptGuid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
uuid::Uuid::from_fields(
self.time_low,
self.time_mid,
self.time_high,
&self.node,
)
.unwrap()
.to_string()
)
}
}
impl GptGuid {
pub(crate) fn new_random() -> Self {
let fields = uuid::Uuid::new_v4();
let fields = fields.as_fields();
GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
}
}
}
#[derive(Debug, Deserialize, PartialEq, Default, Serialize, Copy, Clone)]
pub struct GPTHeader {
/// GPT signature (must be "EFI PART").
pub signature: [u8; 8],
/// 00 00 01 00 up til version 2.17
pub revision: [u8; 4],
/// GPT header size (92 bytes)
pub header_size: u32,
/// CRC32 of the header.
pub self_checksum: u32,
pub reserved: [u8; 4],
/// primary lba where the header is located
pub lba_self: u64,
/// alternative lba where the header is located (backup)
pub lba_alt: u64,
/// first usable lba
pub lba_start: u64,
/// last usable lba
pub lba_end: u64,
/// 16 bytes representing the GUID of the GPT.
pub guid: GptGuid,
/// lba of where to find the partition table
pub lba_table: u64,
/// number of partitions, most tools set this to 128
pub num_entries: u32,
/// Size of element
pub entry_size: u32,
/// CRC32 checksum of the partition array.
pub table_crc: u32,
}
impl GPTHeader {
/// converts a slice into a gpt header and verifies the validity of the data
pub fn from_slice(slice: &[u8]) -> Result<GPTHeader, Error> {
let mut reader = Cursor::new(slice);
let mut gpt: GPTHeader = deserialize_from(&mut reader).unwrap();
if gpt.header_size != 92
|| gpt.signature != [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54]
|| gpt.revision != [0x00, 0x00, 0x01, 0x00]
{
return Err(Error::Invalid);
}
let crc = gpt.self_checksum;
gpt.self_checksum = 0;
gpt.self_checksum = crc32::checksum_ieee(&serialize(&gpt).unwrap());
if gpt.self_checksum != crc {
info!("GPT label crc mismatch");
return Err(Error::Invalid);
}
if gpt.lba_self > gpt.lba_alt {
std::mem::swap(&mut gpt.lba_self, &mut gpt.lba_alt)
}
Ok(gpt)
}
/// checksum the header with the checksum field itself set 0
pub fn checksum(&mut self) -> u32 {
self.self_checksum = 0;
self.self_checksum = crc32::checksum_ieee(&serialize(&self).unwrap());
self.self_checksum
}
pub fn new(blk_size: u32, num_blocks: u64, guid: uuid::Uuid) -> Self {
let fields = guid.as_fields();
GPTHeader {
signature: [0x45, 0x46, 0x49, 0x20, 0x50, 0x41, 0x52, 0x54],
revision: [0x00, 0x00, 0x01, 0x00],
header_size: 92,
self_checksum: 0,
reserved: [0; 4],
lba_self: 1,
lba_alt: num_blocks - 1,
lba_start: u64::from((1 << 20) / blk_size),
lba_end: ((num_blocks - 1) - u64::from((1 << 14) / blk_size)) - 1,
guid: GptGuid {
time_low: fields.0,
time_mid: fields.1,
time_high: fields.2,
node: *fields.3,
},
lba_table: 2,
num_entries: 2,
entry_size: 128,
table_crc: 0,
}
}
pub fn to_backup(&self) -> Self {
let mut secondary = *self;
secondary.lba_self = self.lba_alt;
secondary.lba_alt = self.lba_self;
secondary.lba_table = self.lba_end + 1;
secondary
}
}
#[derive(Debug, Default, PartialEq, Deserialize, Serialize, Clone)]
pub struct GptEntry {
/// GUID type, some of them are assigned/reserved for example to Linux
pub ent_type: GptGuid,
/// entry GUID, can be anything typically random
pub ent_guid: GptGuid,
/// start lba for this entry
pub ent_start: u64,
/// end lba for this entry
pub ent_end: u64,
/// entry attributes, according to do the docs bit 0 MUST be zero
pub ent_attr: u64,
/// utf16 name of the partition entry, do not confuse this fs labels!
pub ent_name: GptName,
}
impl GptEntry {
/// converts a slice into a partition array
pub fn from_slice(
slice: &[u8],
parts: u32,
) -> Result<Vec<GptEntry>, Error> {
let mut reader = Cursor::new(slice);
let mut part_vec = Vec::new();
// TODO 128 should be passed in as a argument
for _ in 0 .. parts {
part_vec.push(deserialize_from(&mut reader)?);
}
Ok(part_vec)
}
/// calculate the checksum over the partitions table
pub fn checksum(parts: &[GptEntry]) -> u32 {
let mut digest = crc32::Digest::new(crc32::IEEE);
for p in parts {
digest.write(&serialize(p).unwrap());
}
digest.sum32()
}
}
#[derive(Debug, PartialEq, Serialize, Clone)]
/// The nexus label is standard GPT label (such that you can use it without us
/// in the data path) The only thing that is really specific to us is the
/// ent_type GUID if we see that attached to a partition, we assume the data in
/// that partition is ours. In the data we will have more magic markers to
/// confirm the assumption but this is step one.
pub struct NexusLabel {
/// the main GPT header
pub primary: GPTHeader,
/// Vector of GPT entries where the first element is considered to be ours
pub partitions: Vec<GptEntry>,
}
impl NexusLabel {
/// returns the offset to the first data segment
pub(crate) fn offset(&self) -> u64 {
self.partitions[1].ent_start
}
/// returns the number of total blocks in this segment
pub(crate) fn num_blocks(&self) -> u64 {
self.partitions[1].ent_end - self.partitions[1].ent_start
}
}
impl Display for NexusLabel {
fn fmt(&self, f | mut fmt::Formatter) -> fmt::Result {
writeln!(f, "GUID: {}", self.primary.guid.to_string())?;
writeln!(f, "\tHeader crc32 {}", self.primary.self_checksum)?;
writeln!(f, "\tPartition table crc32 {}", self.primary.table_crc)?;
for i in 0 .. self.partitions.len() {
writeln!(f, "\tPartition number {}", i)?;
writeln!(f, "\tGUID: {}", self.partitions[i].ent_guid.to_string())?;
writeln!(
f,
"\tType GUID: {}",
self.partitions[i].ent_type.to_string()
)?;
writeln!(
f,
"\tLogical block start: {}, end: {}",
self.partitions[i].ent_start, self.partitions[i].ent_end
)?;
}
Ok(())
}
}
// for arrays bigger then 32 elements, things start to get unimplemented
// in terms of derive and what not. So we create a struct with a string
// and tell serde how to use it during (de)serializing
struct GpEntryNameVisitor;
impl<'de> Deserialize<'de> for GptName {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_tuple_struct("GptName", 36, GpEntryNameVisitor)
}
}
impl Serialize for GptName {
fn serialize<S>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
// we cant use serialize_type_struct here as we want exactly 72 bytes
let mut s = serializer.serialize_tuple(36)?;
let mut out: Vec<u16> = vec![0; 36];
for (i, o) in self.name.encode_utf16().zip(out.iter_mut()) {
*o = i;
}
out.iter().for_each(|e| s.serialize_element(&e).unwrap());
s.end()
}
}
impl<'de> Visitor<'de> for GpEntryNameVisitor {
type Value = GptName;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Invalid GPT partition name")
}
fn visit_seq<A>(self, mut seq: A) -> std::result::Result<GptName, A::Error>
where
A: SeqAccess<'de>,
{
let mut out = Vec::new();
let mut end = false;
loop {
match seq.next_element()? {
Some(0) => {
end = true;
}
Some(e) if !end => out.push(e),
_ => break,
}
}
if end {
Ok(GptName {
name: String::from_utf16_lossy(&out),
})
} else {
Err(serde::de::Error::invalid_value(Unexpected::Seq, &self))
}
}
}
#[derive(Debug, PartialEq, Default, Clone)]
pub struct GptName {
pub name: String,
}
impl GptName {
pub fn as_str(&self) -> &str {
&self.name
}
}
| : & | identifier_name |
jira-api.service.ts | import { Injectable } from '@angular/core';
import { nanoid } from 'nanoid';
import { ChromeExtensionInterfaceService } from '../../../../core/chrome-extension-interface/chrome-extension-interface.service';
import {
JIRA_ADDITIONAL_ISSUE_FIELDS,
JIRA_DATETIME_FORMAT,
JIRA_MAX_RESULTS,
JIRA_REQUEST_TIMEOUT_DURATION,
} from './jira.const';
import {
mapIssueResponse,
mapIssuesResponse,
mapResponse,
mapToSearchResults,
mapTransitionResponse,
} from './jira-issue/jira-issue-map.util';
import {
JiraOriginalStatus,
JiraOriginalTransition,
JiraOriginalUser,
} from './jira-api-responses';
import { JiraCfg } from './jira.model';
import { IPC } from '../../../../../../electron/shared-with-frontend/ipc-events.const';
import { SnackService } from '../../../../core/snack/snack.service';
import { HANDLED_ERROR_PROP_STR, IS_ELECTRON } from '../../../../app.constants';
import { Observable, of, throwError } from 'rxjs';
import { SearchResultItem } from '../../issue.model';
import {
catchError,
concatMap,
finalize,
first,
mapTo,
shareReplay,
take,
} from 'rxjs/operators';
import { JiraIssue, JiraIssueReduced } from './jira-issue/jira-issue.model';
import * as moment from 'moment';
import { BannerService } from '../../../../core/banner/banner.service';
import { BannerId } from '../../../../core/banner/banner.model';
import { T } from '../../../../t.const';
import { ElectronService } from '../../../../core/electron/electron.service';
import { stringify } from 'query-string';
import { fromPromise } from 'rxjs/internal-compatibility';
import { getErrorTxt } from '../../../../util/get-error-text';
import { isOnline } from '../../../../util/is-online';
import { GlobalProgressBarService } from '../../../../core-ui/global-progress-bar/global-progress-bar.service';
import { ipcRenderer, IpcRendererEvent } from 'electron';
import { SS } from '../../../../core/persistence/storage-keys.const';
import { MatDialog } from '@angular/material/dialog';
import { DialogPromptComponent } from '../../../../ui/dialog-prompt/dialog-prompt.component';
import { stripTrailing } from '../../../../util/strip-trailing';
const BLOCK_ACCESS_KEY = 'SUP_BLOCK_JIRA_ACCESS';
const API_VERSION = 'latest';
interface JiraRequestLogItem {
transform: (res: any, cfg: any) => any;
requestInit: RequestInit;
timeoutId: number;
jiraCfg: JiraCfg;
resolve(res: any): Promise<void>;
reject(reason?: any): Promise<unknown>;
}
interface JiraRequestCfg {
pathname: string;
followAllRedirects?: boolean;
method?: 'GET' | 'POST' | 'PUT';
query?: {
[key: string]: string | boolean | number | string[];
};
transform?: (res: any, jiraCfg?: JiraCfg) => any;
body?: Record<string, unknown>;
}
@Injectable({
providedIn: 'root',
})
export class JiraApiService {
private _requestsLog: { [key: string]: JiraRequestLogItem } = {};
private _isBlockAccess: boolean = !!sessionStorage.getItem(BLOCK_ACCESS_KEY);
private _isExtension: boolean = false;
private _isInterfacesReadyIfNeeded$: Observable<boolean> = IS_ELECTRON
? of(true).pipe()
: this._chromeExtensionInterfaceService.onReady$.pipe(mapTo(true), shareReplay(1));
constructor(
private _chromeExtensionInterfaceService: ChromeExtensionInterfaceService,
private _electronService: ElectronService,
private _globalProgressBarService: GlobalProgressBarService,
private _snackService: SnackService,
private _bannerService: BannerService,
private _matDialog: MatDialog,
) {
// set up callback listener for electron
if (IS_ELECTRON) {
(this._electronService.ipcRenderer as typeof ipcRenderer).on(
IPC.JIRA_CB_EVENT,
(ev: IpcRendererEvent, res: any) => {
this._handleResponse(res);
},
);
}
this._chromeExtensionInterfaceService.onReady$.subscribe(() => {
this._isExtension = true;
this._chromeExtensionInterfaceService.addEventListener(
'SP_JIRA_RESPONSE',
(ev: unknown, data: any) => {
this._handleResponse(data);
},
);
});
}
unblockAccess(): void {
this._isBlockAccess = false;
sessionStorage.removeItem(BLOCK_ACCESS_KEY);
}
issuePicker$(searchTerm: string, cfg: JiraCfg): Observable<SearchResultItem[]> {
const searchStr = `${searchTerm}`;
return this._sendRequest$({
jiraReqCfg: {
pathname: 'issue/picker',
followAllRedirects: true,
query: {
showSubTasks: true,
showSubTaskParent: true,
query: searchStr,
currentJQL: cfg.searchJqlQuery,
},
transform: mapToSearchResults,
// NOTE: we pass the cfg as well to avoid race conditions
},
cfg,
});
}
listFields$(cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: 'field',
},
cfg,
});
}
findAutoImportIssues$(
cfg: JiraCfg,
isFetchAdditional?: boolean,
maxResults: number = JIRA_MAX_RESULTS,
): Observable<JiraIssueReduced[]> {
const options = {
maxResults,
fields: [
...JIRA_ADDITIONAL_ISSUE_FIELDS,
...(cfg.storyPointFieldId ? [cfg.storyPointFieldId] : []),
],
};
const searchQuery = cfg.autoAddBacklogJqlQuery;
if (!searchQuery) {
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.NO_AUTO_IMPORT_JQL,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'JiraApi: No search query for auto import',
});
}
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssuesResponse as (res: any, cfg?: JiraCfg) => any,
pathname: 'search',
method: 'POST',
body: {
...options,
jql: searchQuery,
},
},
cfg,
});
}
getIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssue> {
return this._getIssueById$(issueId, cfg, true);
}
getReducedIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssueReduced> {
return this._getIssueById$(issueId, cfg, false);
}
getCurrentUser$(cfg: JiraCfg, isForce: boolean = false): Observable<JiraOriginalUser> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `myself`,
transform: mapResponse,
},
cfg,
isForce,
});
}
listStatus$(cfg: JiraCfg): Observable<JiraOriginalStatus[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `status`,
transform: mapResponse,
},
cfg,
});
}
getTransitionsForIssue$(
issueId: string,
cfg: JiraCfg,
): Observable<JiraOriginalTransition[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'GET',
query: {
expand: 'transitions.fields',
},
transform: mapTransitionResponse,
},
cfg,
});
}
transitionIssue$(issueId: string, transitionId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'POST',
body: {
transition: {
id: transitionId,
},
},
transform: mapResponse,
},
cfg,
});
}
updateAssignee$(issueId: string, accountId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/assignee`,
method: 'PUT',
body: {
accountId,
},
},
cfg,
});
}
addWorklog$({
issueId,
started,
timeSpent,
comment,
cfg,
}: {
issueId: string;
started: string;
timeSpent: number;
comment: string;
cfg: JiraCfg;
}): Observable<any> {
const worklog = {
started: moment(started).locale('en').format(JIRA_DATETIME_FORMAT),
timeSpentSeconds: Math.floor(timeSpent / 1000),
comment,
};
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/worklog`,
method: 'POST',
body: worklog,
transform: mapResponse,
},
cfg,
});
}
private _getIssueById$(
issueId: string,
cfg: JiraCfg,
isGetChangelog: boolean = false,
): Observable<JiraIssue> {
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssueResponse as (res: any, cfg?: JiraCfg) => any,
pathname: `issue/${issueId}`,
query: {
expand: isGetChangelog ? ['changelog', 'description'] : ['description'],
},
},
cfg,
});
}
// Complex Functions
// --------
private _isMinimalSettings(settings: JiraCfg): boolean {
return !!(
settings &&
settings.host &&
settings.userName &&
settings.password &&
(IS_ELECTRON || this._isExtension)
);
}
private _sendRequest$({
jiraReqCfg,
cfg,
isForce = false,
}: {
jiraReqCfg: JiraRequestCfg;
cfg: JiraCfg;
isForce?: boolean;
}): Observable<any> {
return this._isInterfacesReadyIfNeeded$.pipe(
take(1),
concatMap(() =>
IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true),
),
concatMap(() => {
// assign uuid to request to know which responsive belongs to which promise
const requestId = `${jiraReqCfg.pathname}__${
jiraReqCfg.method || 'GET'
}__${nanoid()}`;
if (!isOnline()) {
this._snackService.open({
type: 'CUSTOM',
msg: T.G.NO_CON,
ico: 'cloud_off',
});
return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId });
}
if (!this._isMinimalSettings(cfg)) {
this._snackService.open({
type: 'ERROR',
msg:
!IS_ELECTRON && !this._isExtension
? T.F.JIRA.S.EXTENSION_NOT_LOADED
: T.F.JIRA.S.INSUFFICIENT_SETTINGS,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId,
});
}
if (this._isBlockAccess && !isForce) {
console.error('Blocked Jira Access to prevent being shut out');
this._bannerService.open({
id: BannerId.JiraUnblock,
msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG,
svgIco: 'jira',
action: {
label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK,
fn: () => this.unblockAccess(),
},
});
return throwError({
[HANDLED_ERROR_PROP_STR]:
'Blocked access to prevent being shut out ' + requestId,
});
}
// BUILD REQUEST START
// -------------------
const requestInit = this._makeRequestInit(jiraReqCfg, cfg);
const queryStr = jiraReqCfg.query
? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}`
: '';
const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`;
const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim();
return this._sendRequestToExecutor$(
requestId,
url,
requestInit,
jiraReqCfg.transform,
cfg,
);
// NOTE: offline is sexier & easier than cache, but in case we change our mind...
// const args = [requestId, url, requestInit, jiraReqCfg.transform];
// return this._issueCacheService.cache(url, requestInit, this._sendRequestToExecutor$.bind(this), args);
}),
);
}
private _sendRequestToExecutor$(
requestId: string,
url: string,
requestInit: RequestInit,
transform: any,
jiraCfg: JiraCfg,
): Observable<any> {
// TODO refactor to observable for request canceling etc
let promiseResolve;
let promiseReject;
const promise = new Promise((resolve, reject) => {
promiseResolve = resolve;
promiseReject = reject;
});
// save to request log (also sets up timeout)
this._requestsLog[requestId] = this._makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
});
const requestToSend = { requestId, requestInit, url };
if (this._electronService.isElectronApp) {
(this._electronService.ipcRenderer as typeof ipcRenderer).send(
IPC.JIRA_MAKE_REQUEST_EVENT,
{
...requestToSend,
jiraCfg,
},
);
} else if (this._isExtension) {
this._chromeExtensionInterfaceService.dispatchEvent(
'SP_JIRA_REQUEST',
requestToSend,
);
}
this._globalProgressBarService.countUp(url);
return fromPromise(promise).pipe(
catchError((err) => {
console.log(err);
console.log(getErrorTxt(err));
const errTxt = `Jira: ${getErrorTxt(err)}`;
this._snackService.open({ type: 'ERROR', msg: errTxt });
return throwError({ [HANDLED_ERROR_PROP_STR]: errTxt });
}),
first(),
finalize(() => this._globalProgressBarService.countDown()),
);
}
private _makeRequestInit(jr: JiraRequestCfg, cfg: JiraCfg): RequestInit {
return {
method: jr.method || 'GET',
...(jr.body ? { body: JSON.stringify(jr.body) } : {}),
headers: {
'Content-Type': 'application/json',
...(IS_ELECTRON && cfg.isWonkyCookieMode
? {
Cookie: sessionStorage.getItem(SS.JIRA_WONKY_COOKIE) as string,
}
: cfg.usePAT
? {
Cookie: '',
authorization: `Bearer ${cfg.password}`,
}
: {
Cookie: '',
authorization: `Basic ${this._b64EncodeUnicode(
`${cfg.userName}:${cfg.password}`,
)}`,
}),
},
};
}
private async _checkSetWonkyCookie(cfg: JiraCfg): Promise<string | null> {
const ssVal = sessionStorage.getItem(SS.JIRA_WONKY_COOKIE);
if (ssVal && ssVal.length > 0) | else {
const loginUrl = `${cfg.host}`;
const apiUrl = `${cfg.host}/rest/api/${API_VERSION}/myself`;
const val = await this._matDialog
.open(DialogPromptComponent, {
data: {
// TODO add message to translations
placeholder: 'Insert Cookie String',
message: `<h3>Jira Wonky Cookie Authentication</h3>
<ol>
<li><a href="${loginUrl}">Log into Jira from your browser</a></li>
<li><a href="${apiUrl}" target="_blank">Go to this api url</a></li>
<li>Open up the dev tools (Ctrl+Shift+i)</li>
<li>Navigate to the "Network" tab and reload page</li>
<li>Click the "myself" file on the left side.</li>
<li>In the "Headers" tab, scroll down and locate the "Request Headers" section.</li>
<li>Locate the "cookie" header and right click to copy the value</li>
<li>Fill this form with the cookie as "cookie: {paste-cookie-value}"</li>
</ol>`,
},
})
.afterClosed()
.toPromise();
if (typeof val === 'string') {
sessionStorage.setItem(SS.JIRA_WONKY_COOKIE, val);
return val;
}
}
this._blockAccess();
return null;
}
private _makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
}: {
promiseResolve: any;
promiseReject: any;
requestId: string;
requestInit: RequestInit;
transform: any;
jiraCfg: JiraCfg;
}): JiraRequestLogItem {
return {
transform,
resolve: promiseResolve,
reject: promiseReject,
// NOTE: only needed for debug
requestInit,
jiraCfg,
timeoutId: window.setTimeout(() => {
console.log('ERROR', 'Jira Request timed out', requestInit);
this._blockAccess();
// delete entry for promise
this._snackService.open({
msg: T.F.JIRA.S.TIMED_OUT,
type: 'ERROR',
});
this._requestsLog[requestId].reject('Request timed out');
delete this._requestsLog[requestId];
}, JIRA_REQUEST_TIMEOUT_DURATION),
};
}
private _handleResponse(res: { requestId?: string; error?: any }): void {
// check if proper id is given in callback and if exists in requestLog
if (res.requestId && this._requestsLog[res.requestId]) {
const currentRequest = this._requestsLog[res.requestId];
// cancel timeout for request
window.clearTimeout(currentRequest.timeoutId);
// resolve saved promise
if (!res || res.error) {
console.error('JIRA_RESPONSE_ERROR', res, currentRequest);
// let msg =
if (
res?.error &&
(res.error.statusCode === 401 ||
res.error === 401 ||
res.error.message === 'Forbidden' ||
res.error.message === 'Unauthorized')
) {
this._blockAccess();
}
currentRequest.reject(res);
} else {
// console.log('JIRA_RESPONSE', res);
if (currentRequest.transform) {
// data can be invalid, that's why we check
try {
currentRequest.resolve(currentRequest.transform(res, currentRequest.jiraCfg));
} catch (e) {
console.log(res);
console.log(currentRequest);
console.error(e);
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.INVALID_RESPONSE,
});
}
} else {
currentRequest.resolve(res);
}
}
// delete entry for promise afterwards
delete this._requestsLog[res.requestId];
} else {
console.warn('Jira: Response Request ID not existing', res && res.requestId);
}
}
private _blockAccess(): void {
// TODO also shut down all existing requests
this._isBlockAccess = true;
sessionStorage.setItem(BLOCK_ACCESS_KEY, 'true');
sessionStorage.removeItem(SS.JIRA_WONKY_COOKIE);
}
private _b64EncodeUnicode(str: string): string {
if (typeof (btoa as any) === 'function') {
return btoa(str);
}
throw new Error('Jira: btoo not supported');
}
}
| {
return ssVal;
} | conditional_block |
jira-api.service.ts | import { Injectable } from '@angular/core';
import { nanoid } from 'nanoid';
import { ChromeExtensionInterfaceService } from '../../../../core/chrome-extension-interface/chrome-extension-interface.service';
import {
JIRA_ADDITIONAL_ISSUE_FIELDS,
JIRA_DATETIME_FORMAT,
JIRA_MAX_RESULTS,
JIRA_REQUEST_TIMEOUT_DURATION,
} from './jira.const';
import {
mapIssueResponse,
mapIssuesResponse,
mapResponse,
mapToSearchResults,
mapTransitionResponse,
} from './jira-issue/jira-issue-map.util';
import {
JiraOriginalStatus,
JiraOriginalTransition,
JiraOriginalUser,
} from './jira-api-responses';
import { JiraCfg } from './jira.model';
import { IPC } from '../../../../../../electron/shared-with-frontend/ipc-events.const';
import { SnackService } from '../../../../core/snack/snack.service';
import { HANDLED_ERROR_PROP_STR, IS_ELECTRON } from '../../../../app.constants';
import { Observable, of, throwError } from 'rxjs';
import { SearchResultItem } from '../../issue.model';
import {
catchError,
concatMap,
finalize,
first,
mapTo,
shareReplay,
take,
} from 'rxjs/operators';
import { JiraIssue, JiraIssueReduced } from './jira-issue/jira-issue.model';
import * as moment from 'moment';
import { BannerService } from '../../../../core/banner/banner.service';
import { BannerId } from '../../../../core/banner/banner.model';
import { T } from '../../../../t.const';
import { ElectronService } from '../../../../core/electron/electron.service';
import { stringify } from 'query-string';
import { fromPromise } from 'rxjs/internal-compatibility';
import { getErrorTxt } from '../../../../util/get-error-text';
import { isOnline } from '../../../../util/is-online';
import { GlobalProgressBarService } from '../../../../core-ui/global-progress-bar/global-progress-bar.service';
import { ipcRenderer, IpcRendererEvent } from 'electron';
import { SS } from '../../../../core/persistence/storage-keys.const';
import { MatDialog } from '@angular/material/dialog';
import { DialogPromptComponent } from '../../../../ui/dialog-prompt/dialog-prompt.component';
import { stripTrailing } from '../../../../util/strip-trailing';
const BLOCK_ACCESS_KEY = 'SUP_BLOCK_JIRA_ACCESS';
const API_VERSION = 'latest';
interface JiraRequestLogItem {
transform: (res: any, cfg: any) => any;
requestInit: RequestInit;
timeoutId: number;
jiraCfg: JiraCfg;
resolve(res: any): Promise<void>;
reject(reason?: any): Promise<unknown>;
}
interface JiraRequestCfg {
pathname: string;
followAllRedirects?: boolean;
method?: 'GET' | 'POST' | 'PUT';
query?: {
[key: string]: string | boolean | number | string[];
};
transform?: (res: any, jiraCfg?: JiraCfg) => any;
body?: Record<string, unknown>;
}
@Injectable({
providedIn: 'root',
})
export class JiraApiService {
private _requestsLog: { [key: string]: JiraRequestLogItem } = {};
private _isBlockAccess: boolean = !!sessionStorage.getItem(BLOCK_ACCESS_KEY);
private _isExtension: boolean = false;
private _isInterfacesReadyIfNeeded$: Observable<boolean> = IS_ELECTRON
? of(true).pipe()
: this._chromeExtensionInterfaceService.onReady$.pipe(mapTo(true), shareReplay(1));
constructor(
private _chromeExtensionInterfaceService: ChromeExtensionInterfaceService,
private _electronService: ElectronService,
private _globalProgressBarService: GlobalProgressBarService,
private _snackService: SnackService,
private _bannerService: BannerService,
private _matDialog: MatDialog,
) {
// set up callback listener for electron
if (IS_ELECTRON) {
(this._electronService.ipcRenderer as typeof ipcRenderer).on(
IPC.JIRA_CB_EVENT,
(ev: IpcRendererEvent, res: any) => {
this._handleResponse(res);
},
);
}
this._chromeExtensionInterfaceService.onReady$.subscribe(() => {
this._isExtension = true;
this._chromeExtensionInterfaceService.addEventListener(
'SP_JIRA_RESPONSE',
(ev: unknown, data: any) => {
this._handleResponse(data);
},
);
});
}
| (): void {
this._isBlockAccess = false;
sessionStorage.removeItem(BLOCK_ACCESS_KEY);
}
issuePicker$(searchTerm: string, cfg: JiraCfg): Observable<SearchResultItem[]> {
const searchStr = `${searchTerm}`;
return this._sendRequest$({
jiraReqCfg: {
pathname: 'issue/picker',
followAllRedirects: true,
query: {
showSubTasks: true,
showSubTaskParent: true,
query: searchStr,
currentJQL: cfg.searchJqlQuery,
},
transform: mapToSearchResults,
// NOTE: we pass the cfg as well to avoid race conditions
},
cfg,
});
}
listFields$(cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: 'field',
},
cfg,
});
}
findAutoImportIssues$(
cfg: JiraCfg,
isFetchAdditional?: boolean,
maxResults: number = JIRA_MAX_RESULTS,
): Observable<JiraIssueReduced[]> {
const options = {
maxResults,
fields: [
...JIRA_ADDITIONAL_ISSUE_FIELDS,
...(cfg.storyPointFieldId ? [cfg.storyPointFieldId] : []),
],
};
const searchQuery = cfg.autoAddBacklogJqlQuery;
if (!searchQuery) {
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.NO_AUTO_IMPORT_JQL,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'JiraApi: No search query for auto import',
});
}
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssuesResponse as (res: any, cfg?: JiraCfg) => any,
pathname: 'search',
method: 'POST',
body: {
...options,
jql: searchQuery,
},
},
cfg,
});
}
getIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssue> {
return this._getIssueById$(issueId, cfg, true);
}
getReducedIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssueReduced> {
return this._getIssueById$(issueId, cfg, false);
}
getCurrentUser$(cfg: JiraCfg, isForce: boolean = false): Observable<JiraOriginalUser> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `myself`,
transform: mapResponse,
},
cfg,
isForce,
});
}
listStatus$(cfg: JiraCfg): Observable<JiraOriginalStatus[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `status`,
transform: mapResponse,
},
cfg,
});
}
getTransitionsForIssue$(
issueId: string,
cfg: JiraCfg,
): Observable<JiraOriginalTransition[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'GET',
query: {
expand: 'transitions.fields',
},
transform: mapTransitionResponse,
},
cfg,
});
}
transitionIssue$(issueId: string, transitionId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'POST',
body: {
transition: {
id: transitionId,
},
},
transform: mapResponse,
},
cfg,
});
}
updateAssignee$(issueId: string, accountId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/assignee`,
method: 'PUT',
body: {
accountId,
},
},
cfg,
});
}
addWorklog$({
issueId,
started,
timeSpent,
comment,
cfg,
}: {
issueId: string;
started: string;
timeSpent: number;
comment: string;
cfg: JiraCfg;
}): Observable<any> {
const worklog = {
started: moment(started).locale('en').format(JIRA_DATETIME_FORMAT),
timeSpentSeconds: Math.floor(timeSpent / 1000),
comment,
};
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/worklog`,
method: 'POST',
body: worklog,
transform: mapResponse,
},
cfg,
});
}
private _getIssueById$(
issueId: string,
cfg: JiraCfg,
isGetChangelog: boolean = false,
): Observable<JiraIssue> {
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssueResponse as (res: any, cfg?: JiraCfg) => any,
pathname: `issue/${issueId}`,
query: {
expand: isGetChangelog ? ['changelog', 'description'] : ['description'],
},
},
cfg,
});
}
// Complex Functions
// --------
private _isMinimalSettings(settings: JiraCfg): boolean {
return !!(
settings &&
settings.host &&
settings.userName &&
settings.password &&
(IS_ELECTRON || this._isExtension)
);
}
private _sendRequest$({
jiraReqCfg,
cfg,
isForce = false,
}: {
jiraReqCfg: JiraRequestCfg;
cfg: JiraCfg;
isForce?: boolean;
}): Observable<any> {
return this._isInterfacesReadyIfNeeded$.pipe(
take(1),
concatMap(() =>
IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true),
),
concatMap(() => {
// assign uuid to request to know which responsive belongs to which promise
const requestId = `${jiraReqCfg.pathname}__${
jiraReqCfg.method || 'GET'
}__${nanoid()}`;
if (!isOnline()) {
this._snackService.open({
type: 'CUSTOM',
msg: T.G.NO_CON,
ico: 'cloud_off',
});
return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId });
}
if (!this._isMinimalSettings(cfg)) {
this._snackService.open({
type: 'ERROR',
msg:
!IS_ELECTRON && !this._isExtension
? T.F.JIRA.S.EXTENSION_NOT_LOADED
: T.F.JIRA.S.INSUFFICIENT_SETTINGS,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId,
});
}
if (this._isBlockAccess && !isForce) {
console.error('Blocked Jira Access to prevent being shut out');
this._bannerService.open({
id: BannerId.JiraUnblock,
msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG,
svgIco: 'jira',
action: {
label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK,
fn: () => this.unblockAccess(),
},
});
return throwError({
[HANDLED_ERROR_PROP_STR]:
'Blocked access to prevent being shut out ' + requestId,
});
}
// BUILD REQUEST START
// -------------------
const requestInit = this._makeRequestInit(jiraReqCfg, cfg);
const queryStr = jiraReqCfg.query
? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}`
: '';
const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`;
const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim();
return this._sendRequestToExecutor$(
requestId,
url,
requestInit,
jiraReqCfg.transform,
cfg,
);
// NOTE: offline is sexier & easier than cache, but in case we change our mind...
// const args = [requestId, url, requestInit, jiraReqCfg.transform];
// return this._issueCacheService.cache(url, requestInit, this._sendRequestToExecutor$.bind(this), args);
}),
);
}
private _sendRequestToExecutor$(
requestId: string,
url: string,
requestInit: RequestInit,
transform: any,
jiraCfg: JiraCfg,
): Observable<any> {
// TODO refactor to observable for request canceling etc
let promiseResolve;
let promiseReject;
const promise = new Promise((resolve, reject) => {
promiseResolve = resolve;
promiseReject = reject;
});
// save to request log (also sets up timeout)
this._requestsLog[requestId] = this._makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
});
const requestToSend = { requestId, requestInit, url };
if (this._electronService.isElectronApp) {
(this._electronService.ipcRenderer as typeof ipcRenderer).send(
IPC.JIRA_MAKE_REQUEST_EVENT,
{
...requestToSend,
jiraCfg,
},
);
} else if (this._isExtension) {
this._chromeExtensionInterfaceService.dispatchEvent(
'SP_JIRA_REQUEST',
requestToSend,
);
}
this._globalProgressBarService.countUp(url);
return fromPromise(promise).pipe(
catchError((err) => {
console.log(err);
console.log(getErrorTxt(err));
const errTxt = `Jira: ${getErrorTxt(err)}`;
this._snackService.open({ type: 'ERROR', msg: errTxt });
return throwError({ [HANDLED_ERROR_PROP_STR]: errTxt });
}),
first(),
finalize(() => this._globalProgressBarService.countDown()),
);
}
private _makeRequestInit(jr: JiraRequestCfg, cfg: JiraCfg): RequestInit {
return {
method: jr.method || 'GET',
...(jr.body ? { body: JSON.stringify(jr.body) } : {}),
headers: {
'Content-Type': 'application/json',
...(IS_ELECTRON && cfg.isWonkyCookieMode
? {
Cookie: sessionStorage.getItem(SS.JIRA_WONKY_COOKIE) as string,
}
: cfg.usePAT
? {
Cookie: '',
authorization: `Bearer ${cfg.password}`,
}
: {
Cookie: '',
authorization: `Basic ${this._b64EncodeUnicode(
`${cfg.userName}:${cfg.password}`,
)}`,
}),
},
};
}
private async _checkSetWonkyCookie(cfg: JiraCfg): Promise<string | null> {
const ssVal = sessionStorage.getItem(SS.JIRA_WONKY_COOKIE);
if (ssVal && ssVal.length > 0) {
return ssVal;
} else {
const loginUrl = `${cfg.host}`;
const apiUrl = `${cfg.host}/rest/api/${API_VERSION}/myself`;
const val = await this._matDialog
.open(DialogPromptComponent, {
data: {
// TODO add message to translations
placeholder: 'Insert Cookie String',
message: `<h3>Jira Wonky Cookie Authentication</h3>
<ol>
<li><a href="${loginUrl}">Log into Jira from your browser</a></li>
<li><a href="${apiUrl}" target="_blank">Go to this api url</a></li>
<li>Open up the dev tools (Ctrl+Shift+i)</li>
<li>Navigate to the "Network" tab and reload page</li>
<li>Click the "myself" file on the left side.</li>
<li>In the "Headers" tab, scroll down and locate the "Request Headers" section.</li>
<li>Locate the "cookie" header and right click to copy the value</li>
<li>Fill this form with the cookie as "cookie: {paste-cookie-value}"</li>
</ol>`,
},
})
.afterClosed()
.toPromise();
if (typeof val === 'string') {
sessionStorage.setItem(SS.JIRA_WONKY_COOKIE, val);
return val;
}
}
this._blockAccess();
return null;
}
private _makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
}: {
promiseResolve: any;
promiseReject: any;
requestId: string;
requestInit: RequestInit;
transform: any;
jiraCfg: JiraCfg;
}): JiraRequestLogItem {
return {
transform,
resolve: promiseResolve,
reject: promiseReject,
// NOTE: only needed for debug
requestInit,
jiraCfg,
timeoutId: window.setTimeout(() => {
console.log('ERROR', 'Jira Request timed out', requestInit);
this._blockAccess();
// delete entry for promise
this._snackService.open({
msg: T.F.JIRA.S.TIMED_OUT,
type: 'ERROR',
});
this._requestsLog[requestId].reject('Request timed out');
delete this._requestsLog[requestId];
}, JIRA_REQUEST_TIMEOUT_DURATION),
};
}
private _handleResponse(res: { requestId?: string; error?: any }): void {
// check if proper id is given in callback and if exists in requestLog
if (res.requestId && this._requestsLog[res.requestId]) {
const currentRequest = this._requestsLog[res.requestId];
// cancel timeout for request
window.clearTimeout(currentRequest.timeoutId);
// resolve saved promise
if (!res || res.error) {
console.error('JIRA_RESPONSE_ERROR', res, currentRequest);
// let msg =
if (
res?.error &&
(res.error.statusCode === 401 ||
res.error === 401 ||
res.error.message === 'Forbidden' ||
res.error.message === 'Unauthorized')
) {
this._blockAccess();
}
currentRequest.reject(res);
} else {
// console.log('JIRA_RESPONSE', res);
if (currentRequest.transform) {
// data can be invalid, that's why we check
try {
currentRequest.resolve(currentRequest.transform(res, currentRequest.jiraCfg));
} catch (e) {
console.log(res);
console.log(currentRequest);
console.error(e);
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.INVALID_RESPONSE,
});
}
} else {
currentRequest.resolve(res);
}
}
// delete entry for promise afterwards
delete this._requestsLog[res.requestId];
} else {
console.warn('Jira: Response Request ID not existing', res && res.requestId);
}
}
private _blockAccess(): void {
// TODO also shut down all existing requests
this._isBlockAccess = true;
sessionStorage.setItem(BLOCK_ACCESS_KEY, 'true');
sessionStorage.removeItem(SS.JIRA_WONKY_COOKIE);
}
private _b64EncodeUnicode(str: string): string {
if (typeof (btoa as any) === 'function') {
return btoa(str);
}
throw new Error('Jira: btoo not supported');
}
}
| unblockAccess | identifier_name |
jira-api.service.ts | import { Injectable } from '@angular/core';
import { nanoid } from 'nanoid';
import { ChromeExtensionInterfaceService } from '../../../../core/chrome-extension-interface/chrome-extension-interface.service';
import {
JIRA_ADDITIONAL_ISSUE_FIELDS,
JIRA_DATETIME_FORMAT,
JIRA_MAX_RESULTS,
JIRA_REQUEST_TIMEOUT_DURATION,
} from './jira.const';
import {
mapIssueResponse,
mapIssuesResponse,
mapResponse,
mapToSearchResults,
mapTransitionResponse,
} from './jira-issue/jira-issue-map.util';
import {
JiraOriginalStatus,
JiraOriginalTransition,
JiraOriginalUser,
} from './jira-api-responses';
import { JiraCfg } from './jira.model';
import { IPC } from '../../../../../../electron/shared-with-frontend/ipc-events.const';
import { SnackService } from '../../../../core/snack/snack.service';
import { HANDLED_ERROR_PROP_STR, IS_ELECTRON } from '../../../../app.constants';
import { Observable, of, throwError } from 'rxjs';
import { SearchResultItem } from '../../issue.model';
import {
catchError,
concatMap,
finalize,
first,
mapTo,
shareReplay,
take,
} from 'rxjs/operators';
import { JiraIssue, JiraIssueReduced } from './jira-issue/jira-issue.model';
import * as moment from 'moment';
import { BannerService } from '../../../../core/banner/banner.service';
import { BannerId } from '../../../../core/banner/banner.model';
import { T } from '../../../../t.const';
import { ElectronService } from '../../../../core/electron/electron.service';
import { stringify } from 'query-string';
import { fromPromise } from 'rxjs/internal-compatibility';
import { getErrorTxt } from '../../../../util/get-error-text';
import { isOnline } from '../../../../util/is-online';
import { GlobalProgressBarService } from '../../../../core-ui/global-progress-bar/global-progress-bar.service';
import { ipcRenderer, IpcRendererEvent } from 'electron';
import { SS } from '../../../../core/persistence/storage-keys.const';
import { MatDialog } from '@angular/material/dialog';
import { DialogPromptComponent } from '../../../../ui/dialog-prompt/dialog-prompt.component';
import { stripTrailing } from '../../../../util/strip-trailing';
const BLOCK_ACCESS_KEY = 'SUP_BLOCK_JIRA_ACCESS';
const API_VERSION = 'latest';
interface JiraRequestLogItem {
transform: (res: any, cfg: any) => any;
requestInit: RequestInit;
timeoutId: number;
jiraCfg: JiraCfg;
resolve(res: any): Promise<void>;
reject(reason?: any): Promise<unknown>;
}
interface JiraRequestCfg {
pathname: string;
followAllRedirects?: boolean;
method?: 'GET' | 'POST' | 'PUT';
query?: {
[key: string]: string | boolean | number | string[];
};
transform?: (res: any, jiraCfg?: JiraCfg) => any;
body?: Record<string, unknown>;
}
@Injectable({
providedIn: 'root',
})
export class JiraApiService {
private _requestsLog: { [key: string]: JiraRequestLogItem } = {};
private _isBlockAccess: boolean = !!sessionStorage.getItem(BLOCK_ACCESS_KEY);
private _isExtension: boolean = false;
private _isInterfacesReadyIfNeeded$: Observable<boolean> = IS_ELECTRON
? of(true).pipe()
: this._chromeExtensionInterfaceService.onReady$.pipe(mapTo(true), shareReplay(1));
constructor(
private _chromeExtensionInterfaceService: ChromeExtensionInterfaceService,
private _electronService: ElectronService,
private _globalProgressBarService: GlobalProgressBarService,
private _snackService: SnackService,
private _bannerService: BannerService,
private _matDialog: MatDialog,
) {
// set up callback listener for electron
if (IS_ELECTRON) {
(this._electronService.ipcRenderer as typeof ipcRenderer).on(
IPC.JIRA_CB_EVENT,
(ev: IpcRendererEvent, res: any) => {
this._handleResponse(res);
},
);
}
this._chromeExtensionInterfaceService.onReady$.subscribe(() => {
this._isExtension = true;
this._chromeExtensionInterfaceService.addEventListener(
'SP_JIRA_RESPONSE',
(ev: unknown, data: any) => {
this._handleResponse(data);
},
);
});
}
unblockAccess(): void {
this._isBlockAccess = false;
sessionStorage.removeItem(BLOCK_ACCESS_KEY);
}
issuePicker$(searchTerm: string, cfg: JiraCfg): Observable<SearchResultItem[]> {
const searchStr = `${searchTerm}`;
return this._sendRequest$({
jiraReqCfg: {
pathname: 'issue/picker',
followAllRedirects: true,
query: {
showSubTasks: true,
showSubTaskParent: true,
query: searchStr,
currentJQL: cfg.searchJqlQuery,
},
transform: mapToSearchResults,
// NOTE: we pass the cfg as well to avoid race conditions
},
cfg,
});
}
listFields$(cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: 'field',
},
cfg,
});
}
findAutoImportIssues$(
cfg: JiraCfg,
isFetchAdditional?: boolean,
maxResults: number = JIRA_MAX_RESULTS,
): Observable<JiraIssueReduced[]> |
getIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssue> {
return this._getIssueById$(issueId, cfg, true);
}
getReducedIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssueReduced> {
return this._getIssueById$(issueId, cfg, false);
}
getCurrentUser$(cfg: JiraCfg, isForce: boolean = false): Observable<JiraOriginalUser> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `myself`,
transform: mapResponse,
},
cfg,
isForce,
});
}
listStatus$(cfg: JiraCfg): Observable<JiraOriginalStatus[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `status`,
transform: mapResponse,
},
cfg,
});
}
getTransitionsForIssue$(
issueId: string,
cfg: JiraCfg,
): Observable<JiraOriginalTransition[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'GET',
query: {
expand: 'transitions.fields',
},
transform: mapTransitionResponse,
},
cfg,
});
}
transitionIssue$(issueId: string, transitionId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'POST',
body: {
transition: {
id: transitionId,
},
},
transform: mapResponse,
},
cfg,
});
}
updateAssignee$(issueId: string, accountId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/assignee`,
method: 'PUT',
body: {
accountId,
},
},
cfg,
});
}
addWorklog$({
issueId,
started,
timeSpent,
comment,
cfg,
}: {
issueId: string;
started: string;
timeSpent: number;
comment: string;
cfg: JiraCfg;
}): Observable<any> {
const worklog = {
started: moment(started).locale('en').format(JIRA_DATETIME_FORMAT),
timeSpentSeconds: Math.floor(timeSpent / 1000),
comment,
};
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/worklog`,
method: 'POST',
body: worklog,
transform: mapResponse,
},
cfg,
});
}
private _getIssueById$(
issueId: string,
cfg: JiraCfg,
isGetChangelog: boolean = false,
): Observable<JiraIssue> {
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssueResponse as (res: any, cfg?: JiraCfg) => any,
pathname: `issue/${issueId}`,
query: {
expand: isGetChangelog ? ['changelog', 'description'] : ['description'],
},
},
cfg,
});
}
// Complex Functions
// --------
private _isMinimalSettings(settings: JiraCfg): boolean {
return !!(
settings &&
settings.host &&
settings.userName &&
settings.password &&
(IS_ELECTRON || this._isExtension)
);
}
private _sendRequest$({
jiraReqCfg,
cfg,
isForce = false,
}: {
jiraReqCfg: JiraRequestCfg;
cfg: JiraCfg;
isForce?: boolean;
}): Observable<any> {
return this._isInterfacesReadyIfNeeded$.pipe(
take(1),
concatMap(() =>
IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true),
),
concatMap(() => {
// assign uuid to request to know which responsive belongs to which promise
const requestId = `${jiraReqCfg.pathname}__${
jiraReqCfg.method || 'GET'
}__${nanoid()}`;
if (!isOnline()) {
this._snackService.open({
type: 'CUSTOM',
msg: T.G.NO_CON,
ico: 'cloud_off',
});
return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId });
}
if (!this._isMinimalSettings(cfg)) {
this._snackService.open({
type: 'ERROR',
msg:
!IS_ELECTRON && !this._isExtension
? T.F.JIRA.S.EXTENSION_NOT_LOADED
: T.F.JIRA.S.INSUFFICIENT_SETTINGS,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId,
});
}
if (this._isBlockAccess && !isForce) {
console.error('Blocked Jira Access to prevent being shut out');
this._bannerService.open({
id: BannerId.JiraUnblock,
msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG,
svgIco: 'jira',
action: {
label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK,
fn: () => this.unblockAccess(),
},
});
return throwError({
[HANDLED_ERROR_PROP_STR]:
'Blocked access to prevent being shut out ' + requestId,
});
}
// BUILD REQUEST START
// -------------------
const requestInit = this._makeRequestInit(jiraReqCfg, cfg);
const queryStr = jiraReqCfg.query
? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}`
: '';
const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`;
const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim();
return this._sendRequestToExecutor$(
requestId,
url,
requestInit,
jiraReqCfg.transform,
cfg,
);
// NOTE: offline is sexier & easier than cache, but in case we change our mind...
// const args = [requestId, url, requestInit, jiraReqCfg.transform];
// return this._issueCacheService.cache(url, requestInit, this._sendRequestToExecutor$.bind(this), args);
}),
);
}
private _sendRequestToExecutor$(
requestId: string,
url: string,
requestInit: RequestInit,
transform: any,
jiraCfg: JiraCfg,
): Observable<any> {
// TODO refactor to observable for request canceling etc
let promiseResolve;
let promiseReject;
const promise = new Promise((resolve, reject) => {
promiseResolve = resolve;
promiseReject = reject;
});
// save to request log (also sets up timeout)
this._requestsLog[requestId] = this._makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
});
const requestToSend = { requestId, requestInit, url };
if (this._electronService.isElectronApp) {
(this._electronService.ipcRenderer as typeof ipcRenderer).send(
IPC.JIRA_MAKE_REQUEST_EVENT,
{
...requestToSend,
jiraCfg,
},
);
} else if (this._isExtension) {
this._chromeExtensionInterfaceService.dispatchEvent(
'SP_JIRA_REQUEST',
requestToSend,
);
}
this._globalProgressBarService.countUp(url);
return fromPromise(promise).pipe(
catchError((err) => {
console.log(err);
console.log(getErrorTxt(err));
const errTxt = `Jira: ${getErrorTxt(err)}`;
this._snackService.open({ type: 'ERROR', msg: errTxt });
return throwError({ [HANDLED_ERROR_PROP_STR]: errTxt });
}),
first(),
finalize(() => this._globalProgressBarService.countDown()),
);
}
private _makeRequestInit(jr: JiraRequestCfg, cfg: JiraCfg): RequestInit {
return {
method: jr.method || 'GET',
...(jr.body ? { body: JSON.stringify(jr.body) } : {}),
headers: {
'Content-Type': 'application/json',
...(IS_ELECTRON && cfg.isWonkyCookieMode
? {
Cookie: sessionStorage.getItem(SS.JIRA_WONKY_COOKIE) as string,
}
: cfg.usePAT
? {
Cookie: '',
authorization: `Bearer ${cfg.password}`,
}
: {
Cookie: '',
authorization: `Basic ${this._b64EncodeUnicode(
`${cfg.userName}:${cfg.password}`,
)}`,
}),
},
};
}
private async _checkSetWonkyCookie(cfg: JiraCfg): Promise<string | null> {
const ssVal = sessionStorage.getItem(SS.JIRA_WONKY_COOKIE);
if (ssVal && ssVal.length > 0) {
return ssVal;
} else {
const loginUrl = `${cfg.host}`;
const apiUrl = `${cfg.host}/rest/api/${API_VERSION}/myself`;
const val = await this._matDialog
.open(DialogPromptComponent, {
data: {
// TODO add message to translations
placeholder: 'Insert Cookie String',
message: `<h3>Jira Wonky Cookie Authentication</h3>
<ol>
<li><a href="${loginUrl}">Log into Jira from your browser</a></li>
<li><a href="${apiUrl}" target="_blank">Go to this api url</a></li>
<li>Open up the dev tools (Ctrl+Shift+i)</li>
<li>Navigate to the "Network" tab and reload page</li>
<li>Click the "myself" file on the left side.</li>
<li>In the "Headers" tab, scroll down and locate the "Request Headers" section.</li>
<li>Locate the "cookie" header and right click to copy the value</li>
<li>Fill this form with the cookie as "cookie: {paste-cookie-value}"</li>
</ol>`,
},
})
.afterClosed()
.toPromise();
if (typeof val === 'string') {
sessionStorage.setItem(SS.JIRA_WONKY_COOKIE, val);
return val;
}
}
this._blockAccess();
return null;
}
private _makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
}: {
promiseResolve: any;
promiseReject: any;
requestId: string;
requestInit: RequestInit;
transform: any;
jiraCfg: JiraCfg;
}): JiraRequestLogItem {
return {
transform,
resolve: promiseResolve,
reject: promiseReject,
// NOTE: only needed for debug
requestInit,
jiraCfg,
timeoutId: window.setTimeout(() => {
console.log('ERROR', 'Jira Request timed out', requestInit);
this._blockAccess();
// delete entry for promise
this._snackService.open({
msg: T.F.JIRA.S.TIMED_OUT,
type: 'ERROR',
});
this._requestsLog[requestId].reject('Request timed out');
delete this._requestsLog[requestId];
}, JIRA_REQUEST_TIMEOUT_DURATION),
};
}
private _handleResponse(res: { requestId?: string; error?: any }): void {
// check if proper id is given in callback and if exists in requestLog
if (res.requestId && this._requestsLog[res.requestId]) {
const currentRequest = this._requestsLog[res.requestId];
// cancel timeout for request
window.clearTimeout(currentRequest.timeoutId);
// resolve saved promise
if (!res || res.error) {
console.error('JIRA_RESPONSE_ERROR', res, currentRequest);
// let msg =
if (
res?.error &&
(res.error.statusCode === 401 ||
res.error === 401 ||
res.error.message === 'Forbidden' ||
res.error.message === 'Unauthorized')
) {
this._blockAccess();
}
currentRequest.reject(res);
} else {
// console.log('JIRA_RESPONSE', res);
if (currentRequest.transform) {
// data can be invalid, that's why we check
try {
currentRequest.resolve(currentRequest.transform(res, currentRequest.jiraCfg));
} catch (e) {
console.log(res);
console.log(currentRequest);
console.error(e);
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.INVALID_RESPONSE,
});
}
} else {
currentRequest.resolve(res);
}
}
// delete entry for promise afterwards
delete this._requestsLog[res.requestId];
} else {
console.warn('Jira: Response Request ID not existing', res && res.requestId);
}
}
private _blockAccess(): void {
// TODO also shut down all existing requests
this._isBlockAccess = true;
sessionStorage.setItem(BLOCK_ACCESS_KEY, 'true');
sessionStorage.removeItem(SS.JIRA_WONKY_COOKIE);
}
private _b64EncodeUnicode(str: string): string {
if (typeof (btoa as any) === 'function') {
return btoa(str);
}
throw new Error('Jira: btoo not supported');
}
}
| {
const options = {
maxResults,
fields: [
...JIRA_ADDITIONAL_ISSUE_FIELDS,
...(cfg.storyPointFieldId ? [cfg.storyPointFieldId] : []),
],
};
const searchQuery = cfg.autoAddBacklogJqlQuery;
if (!searchQuery) {
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.NO_AUTO_IMPORT_JQL,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'JiraApi: No search query for auto import',
});
}
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssuesResponse as (res: any, cfg?: JiraCfg) => any,
pathname: 'search',
method: 'POST',
body: {
...options,
jql: searchQuery,
},
},
cfg,
});
} | identifier_body |
jira-api.service.ts | import { Injectable } from '@angular/core';
import { nanoid } from 'nanoid';
import { ChromeExtensionInterfaceService } from '../../../../core/chrome-extension-interface/chrome-extension-interface.service';
import {
JIRA_ADDITIONAL_ISSUE_FIELDS,
JIRA_DATETIME_FORMAT,
JIRA_MAX_RESULTS,
JIRA_REQUEST_TIMEOUT_DURATION,
} from './jira.const';
import {
mapIssueResponse,
mapIssuesResponse,
mapResponse,
mapToSearchResults,
mapTransitionResponse,
} from './jira-issue/jira-issue-map.util';
import {
JiraOriginalStatus,
JiraOriginalTransition,
JiraOriginalUser,
} from './jira-api-responses';
import { JiraCfg } from './jira.model';
import { IPC } from '../../../../../../electron/shared-with-frontend/ipc-events.const';
import { SnackService } from '../../../../core/snack/snack.service';
import { HANDLED_ERROR_PROP_STR, IS_ELECTRON } from '../../../../app.constants';
import { Observable, of, throwError } from 'rxjs';
import { SearchResultItem } from '../../issue.model';
import {
catchError,
concatMap,
finalize,
first,
mapTo,
shareReplay,
take,
} from 'rxjs/operators';
import { JiraIssue, JiraIssueReduced } from './jira-issue/jira-issue.model';
import * as moment from 'moment';
import { BannerService } from '../../../../core/banner/banner.service';
import { BannerId } from '../../../../core/banner/banner.model';
import { T } from '../../../../t.const';
import { ElectronService } from '../../../../core/electron/electron.service';
import { stringify } from 'query-string';
import { fromPromise } from 'rxjs/internal-compatibility';
import { getErrorTxt } from '../../../../util/get-error-text';
import { isOnline } from '../../../../util/is-online';
import { GlobalProgressBarService } from '../../../../core-ui/global-progress-bar/global-progress-bar.service';
import { ipcRenderer, IpcRendererEvent } from 'electron';
import { SS } from '../../../../core/persistence/storage-keys.const';
import { MatDialog } from '@angular/material/dialog';
import { DialogPromptComponent } from '../../../../ui/dialog-prompt/dialog-prompt.component';
import { stripTrailing } from '../../../../util/strip-trailing';
const BLOCK_ACCESS_KEY = 'SUP_BLOCK_JIRA_ACCESS';
const API_VERSION = 'latest';
interface JiraRequestLogItem {
transform: (res: any, cfg: any) => any;
requestInit: RequestInit;
timeoutId: number;
jiraCfg: JiraCfg;
resolve(res: any): Promise<void>;
reject(reason?: any): Promise<unknown>;
}
interface JiraRequestCfg {
pathname: string;
followAllRedirects?: boolean;
method?: 'GET' | 'POST' | 'PUT';
query?: {
[key: string]: string | boolean | number | string[];
};
transform?: (res: any, jiraCfg?: JiraCfg) => any;
body?: Record<string, unknown>;
}
@Injectable({
providedIn: 'root',
})
export class JiraApiService {
private _requestsLog: { [key: string]: JiraRequestLogItem } = {};
private _isBlockAccess: boolean = !!sessionStorage.getItem(BLOCK_ACCESS_KEY);
private _isExtension: boolean = false;
private _isInterfacesReadyIfNeeded$: Observable<boolean> = IS_ELECTRON
? of(true).pipe()
: this._chromeExtensionInterfaceService.onReady$.pipe(mapTo(true), shareReplay(1));
constructor(
private _chromeExtensionInterfaceService: ChromeExtensionInterfaceService,
private _electronService: ElectronService,
private _globalProgressBarService: GlobalProgressBarService,
private _snackService: SnackService,
private _bannerService: BannerService,
private _matDialog: MatDialog,
) {
// set up callback listener for electron
if (IS_ELECTRON) {
(this._electronService.ipcRenderer as typeof ipcRenderer).on(
IPC.JIRA_CB_EVENT,
(ev: IpcRendererEvent, res: any) => {
this._handleResponse(res);
},
);
}
this._chromeExtensionInterfaceService.onReady$.subscribe(() => {
this._isExtension = true;
this._chromeExtensionInterfaceService.addEventListener(
'SP_JIRA_RESPONSE',
(ev: unknown, data: any) => {
this._handleResponse(data);
},
);
});
}
unblockAccess(): void {
this._isBlockAccess = false;
sessionStorage.removeItem(BLOCK_ACCESS_KEY);
}
issuePicker$(searchTerm: string, cfg: JiraCfg): Observable<SearchResultItem[]> {
const searchStr = `${searchTerm}`;
return this._sendRequest$({
jiraReqCfg: {
pathname: 'issue/picker',
followAllRedirects: true,
query: {
showSubTasks: true,
showSubTaskParent: true,
query: searchStr,
currentJQL: cfg.searchJqlQuery,
},
transform: mapToSearchResults,
// NOTE: we pass the cfg as well to avoid race conditions
},
cfg,
});
}
listFields$(cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: 'field',
},
cfg,
});
}
findAutoImportIssues$(
cfg: JiraCfg,
isFetchAdditional?: boolean,
maxResults: number = JIRA_MAX_RESULTS,
): Observable<JiraIssueReduced[]> {
const options = {
maxResults,
fields: [
...JIRA_ADDITIONAL_ISSUE_FIELDS,
...(cfg.storyPointFieldId ? [cfg.storyPointFieldId] : []),
],
};
const searchQuery = cfg.autoAddBacklogJqlQuery;
if (!searchQuery) {
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.NO_AUTO_IMPORT_JQL,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'JiraApi: No search query for auto import',
});
}
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssuesResponse as (res: any, cfg?: JiraCfg) => any,
pathname: 'search',
method: 'POST',
body: {
...options,
jql: searchQuery,
},
},
cfg,
});
}
getIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssue> {
return this._getIssueById$(issueId, cfg, true);
}
getReducedIssueById$(issueId: string, cfg: JiraCfg): Observable<JiraIssueReduced> {
return this._getIssueById$(issueId, cfg, false);
}
getCurrentUser$(cfg: JiraCfg, isForce: boolean = false): Observable<JiraOriginalUser> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `myself`,
transform: mapResponse,
},
cfg,
isForce,
});
}
listStatus$(cfg: JiraCfg): Observable<JiraOriginalStatus[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `status`,
transform: mapResponse,
},
cfg,
});
}
getTransitionsForIssue$(
issueId: string,
cfg: JiraCfg,
): Observable<JiraOriginalTransition[]> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'GET',
query: {
expand: 'transitions.fields',
},
transform: mapTransitionResponse,
},
cfg,
});
}
transitionIssue$(issueId: string, transitionId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/transitions`,
method: 'POST',
body: {
transition: {
id: transitionId,
},
},
transform: mapResponse,
},
cfg,
});
}
updateAssignee$(issueId: string, accountId: string, cfg: JiraCfg): Observable<any> {
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/assignee`,
method: 'PUT',
body: {
accountId,
},
},
cfg,
});
}
addWorklog$({
issueId,
started,
timeSpent,
comment,
cfg,
}: {
issueId: string;
started: string;
timeSpent: number;
comment: string;
cfg: JiraCfg;
}): Observable<any> {
const worklog = {
started: moment(started).locale('en').format(JIRA_DATETIME_FORMAT),
timeSpentSeconds: Math.floor(timeSpent / 1000),
comment,
};
return this._sendRequest$({
jiraReqCfg: {
pathname: `issue/${issueId}/worklog`,
method: 'POST',
body: worklog,
transform: mapResponse,
},
cfg,
});
}
private _getIssueById$(
issueId: string,
cfg: JiraCfg,
isGetChangelog: boolean = false,
): Observable<JiraIssue> {
return this._sendRequest$({
jiraReqCfg: {
transform: mapIssueResponse as (res: any, cfg?: JiraCfg) => any,
pathname: `issue/${issueId}`,
query: {
expand: isGetChangelog ? ['changelog', 'description'] : ['description'],
},
},
cfg,
});
}
// Complex Functions
// --------
private _isMinimalSettings(settings: JiraCfg): boolean {
return !!(
settings &&
settings.host &&
settings.userName &&
settings.password &&
(IS_ELECTRON || this._isExtension)
);
}
private _sendRequest$({
jiraReqCfg,
cfg,
isForce = false,
}: {
jiraReqCfg: JiraRequestCfg;
cfg: JiraCfg;
isForce?: boolean;
}): Observable<any> {
return this._isInterfacesReadyIfNeeded$.pipe(
take(1),
concatMap(() =>
IS_ELECTRON && cfg.isWonkyCookieMode ? this._checkSetWonkyCookie(cfg) : of(true),
),
concatMap(() => {
// assign uuid to request to know which responsive belongs to which promise
const requestId = `${jiraReqCfg.pathname}__${
jiraReqCfg.method || 'GET'
}__${nanoid()}`;
if (!isOnline()) {
this._snackService.open({
type: 'CUSTOM',
msg: T.G.NO_CON,
ico: 'cloud_off',
});
return throwError({ [HANDLED_ERROR_PROP_STR]: 'Jira Offline ' + requestId });
}
if (!this._isMinimalSettings(cfg)) {
this._snackService.open({
type: 'ERROR',
msg:
!IS_ELECTRON && !this._isExtension
? T.F.JIRA.S.EXTENSION_NOT_LOADED
: T.F.JIRA.S.INSUFFICIENT_SETTINGS,
});
return throwError({
[HANDLED_ERROR_PROP_STR]: 'Insufficient Settings for Jira ' + requestId,
});
}
if (this._isBlockAccess && !isForce) {
console.error('Blocked Jira Access to prevent being shut out');
this._bannerService.open({
id: BannerId.JiraUnblock,
msg: T.F.JIRA.BANNER.BLOCK_ACCESS_MSG,
svgIco: 'jira',
action: {
label: T.F.JIRA.BANNER.BLOCK_ACCESS_UNBLOCK,
fn: () => this.unblockAccess(),
},
});
return throwError({
[HANDLED_ERROR_PROP_STR]:
'Blocked access to prevent being shut out ' + requestId,
});
}
// BUILD REQUEST START
// -------------------
const requestInit = this._makeRequestInit(jiraReqCfg, cfg);
const queryStr = jiraReqCfg.query
? `?${stringify(jiraReqCfg.query, { arrayFormat: 'comma' })}`
: '';
const base = `${stripTrailing(cfg.host || 'null', '/')}/rest/api/${API_VERSION}`;
const url = `${base}/${jiraReqCfg.pathname}${queryStr}`.trim();
return this._sendRequestToExecutor$(
requestId,
url,
requestInit,
jiraReqCfg.transform,
cfg,
);
// NOTE: offline is sexier & easier than cache, but in case we change our mind...
// const args = [requestId, url, requestInit, jiraReqCfg.transform];
// return this._issueCacheService.cache(url, requestInit, this._sendRequestToExecutor$.bind(this), args);
}),
);
}
private _sendRequestToExecutor$(
requestId: string,
url: string,
requestInit: RequestInit,
transform: any,
jiraCfg: JiraCfg,
): Observable<any> {
// TODO refactor to observable for request canceling etc
let promiseResolve;
let promiseReject;
const promise = new Promise((resolve, reject) => {
promiseResolve = resolve;
promiseReject = reject;
});
// save to request log (also sets up timeout)
this._requestsLog[requestId] = this._makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
});
const requestToSend = { requestId, requestInit, url };
if (this._electronService.isElectronApp) {
(this._electronService.ipcRenderer as typeof ipcRenderer).send(
IPC.JIRA_MAKE_REQUEST_EVENT,
{
...requestToSend,
jiraCfg,
},
);
} else if (this._isExtension) {
this._chromeExtensionInterfaceService.dispatchEvent(
'SP_JIRA_REQUEST',
requestToSend,
);
}
this._globalProgressBarService.countUp(url);
return fromPromise(promise).pipe(
catchError((err) => {
console.log(err);
console.log(getErrorTxt(err));
const errTxt = `Jira: ${getErrorTxt(err)}`;
this._snackService.open({ type: 'ERROR', msg: errTxt });
return throwError({ [HANDLED_ERROR_PROP_STR]: errTxt });
}),
first(),
finalize(() => this._globalProgressBarService.countDown()),
);
}
private _makeRequestInit(jr: JiraRequestCfg, cfg: JiraCfg): RequestInit {
return {
method: jr.method || 'GET',
...(jr.body ? { body: JSON.stringify(jr.body) } : {}),
headers: {
'Content-Type': 'application/json',
...(IS_ELECTRON && cfg.isWonkyCookieMode
? {
Cookie: sessionStorage.getItem(SS.JIRA_WONKY_COOKIE) as string,
}
: cfg.usePAT
? {
Cookie: '',
authorization: `Bearer ${cfg.password}`,
}
: {
Cookie: '',
authorization: `Basic ${this._b64EncodeUnicode(
`${cfg.userName}:${cfg.password}`,
)}`,
}),
},
};
}
private async _checkSetWonkyCookie(cfg: JiraCfg): Promise<string | null> {
const ssVal = sessionStorage.getItem(SS.JIRA_WONKY_COOKIE);
if (ssVal && ssVal.length > 0) {
return ssVal;
} else {
const loginUrl = `${cfg.host}`;
const apiUrl = `${cfg.host}/rest/api/${API_VERSION}/myself`;
const val = await this._matDialog
.open(DialogPromptComponent, {
data: {
// TODO add message to translations
placeholder: 'Insert Cookie String',
message: `<h3>Jira Wonky Cookie Authentication</h3>
<ol>
<li><a href="${loginUrl}">Log into Jira from your browser</a></li>
<li><a href="${apiUrl}" target="_blank">Go to this api url</a></li>
<li>Open up the dev tools (Ctrl+Shift+i)</li>
<li>Navigate to the "Network" tab and reload page</li>
<li>Click the "myself" file on the left side.</li>
<li>In the "Headers" tab, scroll down and locate the "Request Headers" section.</li>
<li>Locate the "cookie" header and right click to copy the value</li>
<li>Fill this form with the cookie as "cookie: {paste-cookie-value}"</li>
</ol>`,
},
})
.afterClosed()
.toPromise();
if (typeof val === 'string') {
sessionStorage.setItem(SS.JIRA_WONKY_COOKIE, val);
return val;
}
}
this._blockAccess();
return null;
}
private _makeJiraRequestLogItem({
promiseResolve,
promiseReject,
requestId,
requestInit,
transform,
jiraCfg,
}: {
promiseResolve: any;
promiseReject: any;
requestId: string;
requestInit: RequestInit;
transform: any;
jiraCfg: JiraCfg;
}): JiraRequestLogItem {
return {
transform,
resolve: promiseResolve,
reject: promiseReject,
// NOTE: only needed for debug
requestInit,
jiraCfg,
timeoutId: window.setTimeout(() => {
console.log('ERROR', 'Jira Request timed out', requestInit);
this._blockAccess();
// delete entry for promise
this._snackService.open({
msg: T.F.JIRA.S.TIMED_OUT,
type: 'ERROR',
});
this._requestsLog[requestId].reject('Request timed out');
delete this._requestsLog[requestId];
}, JIRA_REQUEST_TIMEOUT_DURATION),
};
}
private _handleResponse(res: { requestId?: string; error?: any }): void {
// check if proper id is given in callback and if exists in requestLog
if (res.requestId && this._requestsLog[res.requestId]) {
const currentRequest = this._requestsLog[res.requestId];
// cancel timeout for request
window.clearTimeout(currentRequest.timeoutId);
// resolve saved promise
if (!res || res.error) {
console.error('JIRA_RESPONSE_ERROR', res, currentRequest);
// let msg =
if (
res?.error &&
(res.error.statusCode === 401 ||
res.error === 401 ||
res.error.message === 'Forbidden' ||
res.error.message === 'Unauthorized')
) {
this._blockAccess();
}
currentRequest.reject(res);
} else {
// console.log('JIRA_RESPONSE', res);
if (currentRequest.transform) {
// data can be invalid, that's why we check
try {
currentRequest.resolve(currentRequest.transform(res, currentRequest.jiraCfg));
} catch (e) {
console.log(res);
console.log(currentRequest);
console.error(e);
this._snackService.open({
type: 'ERROR',
msg: T.F.JIRA.S.INVALID_RESPONSE, | }
} else {
currentRequest.resolve(res);
}
}
// delete entry for promise afterwards
delete this._requestsLog[res.requestId];
} else {
console.warn('Jira: Response Request ID not existing', res && res.requestId);
}
}
private _blockAccess(): void {
// TODO also shut down all existing requests
this._isBlockAccess = true;
sessionStorage.setItem(BLOCK_ACCESS_KEY, 'true');
sessionStorage.removeItem(SS.JIRA_WONKY_COOKIE);
}
private _b64EncodeUnicode(str: string): string {
if (typeof (btoa as any) === 'function') {
return btoa(str);
}
throw new Error('Jira: btoo not supported');
}
} | }); | random_line_split |
home.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package user
import (
"bytes"
"fmt"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
activities_model "code.gitea.io/gitea/models/activities"
asymkey_model "code.gitea.io/gitea/models/asymkey"
"code.gitea.io/gitea/models/db"
issues_model "code.gitea.io/gitea/models/issues"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/context"
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/routers/web/feed"
context_service "code.gitea.io/gitea/services/context"
issue_service "code.gitea.io/gitea/services/issue"
pull_service "code.gitea.io/gitea/services/pull"
"github.com/keybase/go-crypto/openpgp"
"github.com/keybase/go-crypto/openpgp/armor"
"xorm.io/builder"
)
const (
tplDashboard base.TplName = "user/dashboard/dashboard"
tplIssues base.TplName = "user/dashboard/issues"
tplMilestones base.TplName = "user/dashboard/milestones"
tplProfile base.TplName = "user/profile"
)
// getDashboardContextUser finds out which context user dashboard is being viewed as .
func getDashboardContextUser(ctx *context.Context) *user_model.User {
ctxUser := ctx.Doer
orgName := ctx.Params(":org")
if len(orgName) > 0 {
ctxUser = ctx.Org.Organization.AsUser()
ctx.Data["Teams"] = ctx.Org.Teams
}
ctx.Data["ContextUser"] = ctxUser
orgs, err := organization.GetUserOrgsList(ctx.Doer)
if err != nil {
ctx.ServerError("GetUserOrgsList", err)
return nil
}
ctx.Data["Orgs"] = orgs
return ctxUser
}
// Dashboard render the dashboard page
func Dashboard(ctx *context.Context) {
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
date = ctx.FormString("date")
page = ctx.FormInt("page")
)
// Make sure page number is at least 1. Will be posted to ctx.Data.
if page <= 1 {
page = 1
}
ctx.Data["Title"] = ctxUser.DisplayName() + " - " + ctx.Tr("dashboard")
ctx.Data["PageIsDashboard"] = true
ctx.Data["PageIsNews"] = true
cnt, _ := organization.GetOrganizationCount(ctx, ctxUser)
ctx.Data["UserOrgsCount"] = cnt
ctx.Data["MirrorsEnabled"] = setting.Mirror.Enabled
ctx.Data["Date"] = date
var uid int64
if ctxUser != nil {
uid = ctxUser.ID
}
ctx.PageData["dashboardRepoList"] = map[string]any{
"searchLimit": setting.UI.User.RepoPagingNum,
"uid": uid,
}
if setting.Service.EnableUserHeatmap {
data, err := activities_model.GetUserHeatmapDataByUserTeam(ctxUser, ctx.Org.Team, ctx.Doer)
if err != nil {
ctx.ServerError("GetUserHeatmapDataByUserTeam", err)
return
}
ctx.Data["HeatmapData"] = data
ctx.Data["HeatmapTotalContributions"] = activities_model.GetTotalContributionsInHeatmap(data)
}
feeds, count, err := activities_model.GetFeeds(ctx, activities_model.GetFeedsOptions{
RequestedUser: ctxUser,
RequestedTeam: ctx.Org.Team,
Actor: ctx.Doer,
IncludePrivate: true,
OnlyPerformedBy: false,
IncludeDeleted: false,
Date: ctx.FormString("date"),
ListOptions: db.ListOptions{
Page: page,
PageSize: setting.UI.FeedPagingNum,
},
})
if err != nil {
ctx.ServerError("GetFeeds", err)
return
}
ctx.Data["Feeds"] = feeds
pager := context.NewPagination(int(count), setting.UI.FeedPagingNum, page, 5)
pager.AddParam(ctx, "date", "Date")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplDashboard)
}
// Milestones render the user milestones page
func Milestones(ctx *context.Context) |
// Pulls renders the user's pull request overview page
func Pulls(ctx *context.Context) {
if unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Pull request overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("pull_requests")
ctx.Data["PageIsPulls"] = true
ctx.Data["SingleRepoAction"] = "pull"
buildIssueOverview(ctx, unit.TypePullRequests)
}
// Issues renders the user's issues overview page
func Issues(ctx *context.Context) {
if unit.TypeIssues.UnitGlobalDisabled() {
log.Debug("Issues overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("issues")
ctx.Data["PageIsIssues"] = true
ctx.Data["SingleRepoAction"] = "issue"
buildIssueOverview(ctx, unit.TypeIssues)
}
// Regexp for repos query
var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`)
func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
// ----------------------------------------------------
// Determine user; can be either user or organization.
// Return with NotFound or ServerError if unsuccessful.
// ----------------------------------------------------
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
viewType string
sortType = ctx.FormString("sort")
filterMode int
)
// Default to recently updated, unlike repository issues list
if sortType == "" {
sortType = "recentupdate"
}
// --------------------------------------------------------------------------------
// Distinguish User from Organization.
// Org:
// - Remember pre-determined viewType string for later. Will be posted to ctx.Data.
// Organization does not have view type and filter mode.
// User:
// - Use ctx.FormString("type") to determine filterMode.
// The type is set when clicking for example "assigned to me" on the overview page.
// - Remember either this or a fallback. Will be posted to ctx.Data.
// --------------------------------------------------------------------------------
// TODO: distinguish during routing
viewType = ctx.FormString("type")
switch viewType {
case "assigned":
filterMode = issues_model.FilterModeAssign
case "created_by":
filterMode = issues_model.FilterModeCreate
case "mentioned":
filterMode = issues_model.FilterModeMention
case "review_requested":
filterMode = issues_model.FilterModeReviewRequested
case "reviewed_by":
filterMode = issues_model.FilterModeReviewed
case "your_repositories":
fallthrough
default:
filterMode = issues_model.FilterModeYourRepositories
viewType = "your_repositories"
}
// --------------------------------------------------------------------------
// Build opts (IssuesOptions), which contains filter information.
// Will eventually be used to retrieve issues relevant for the overview page.
// Note: Non-final states of opts are used in-between, namely for:
// - Keyword search
// - Count Issues by repo
// --------------------------------------------------------------------------
// Get repository IDs where User/Org/Team has access.
var team *organization.Team
var org *organization.Organization
if ctx.Org != nil {
org = ctx.Org.Organization
team = ctx.Org.Team
}
isPullList := unitType == unit.TypePullRequests
opts := &issues_model.IssuesOptions{
IsPull: util.OptionalBoolOf(isPullList),
SortType: sortType,
IsArchived: util.OptionalBoolFalse,
Org: org,
Team: team,
User: ctx.Doer,
}
// Search all repositories which
//
// As user:
// - Owns the repository.
// - Have collaborator permissions in repository.
//
// As org:
// - Owns the repository.
//
// As team:
// - Team org's owns the repository.
// - Team has read permission to repository.
repoOpts := &repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false,
AllLimited: false,
Collaborate: util.OptionalBoolNone,
UnitType: unitType,
Archived: util.OptionalBoolFalse,
}
if team != nil {
repoOpts.TeamID = team.ID
}
accessibleRepos := container.Set[int64]{}
{
ids, _, err := repo_model.SearchRepositoryIDs(repoOpts)
if err != nil {
ctx.ServerError("SearchRepositoryIDs", err)
return
}
accessibleRepos.AddMultiple(ids...)
opts.RepoIDs = ids
if len(opts.RepoIDs) == 0 {
// no repos found, don't let the indexer return all repos
opts.RepoIDs = []int64{0}
}
}
switch filterMode {
case issues_model.FilterModeAll:
case issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
opts.AssigneeID = ctx.Doer.ID
case issues_model.FilterModeCreate:
opts.PosterID = ctx.Doer.ID
case issues_model.FilterModeMention:
opts.MentionedID = ctx.Doer.ID
case issues_model.FilterModeReviewRequested:
opts.ReviewRequestedID = ctx.Doer.ID
case issues_model.FilterModeReviewed:
opts.ReviewedID = ctx.Doer.ID
}
// keyword holds the search term entered into the search field.
keyword := strings.Trim(ctx.FormString("q"), " ")
ctx.Data["Keyword"] = keyword
// Educated guess: Do or don't show closed issues.
isShowClosed := ctx.FormString("state") == "closed"
opts.IsClosed = util.OptionalBoolOf(isShowClosed)
// Filter repos and count issues in them. Count will be used later.
// USING NON-FINAL STATE OF opts FOR A QUERY.
issueCountByRepo, err := issue_indexer.CountIssuesByRepo(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("CountIssuesByRepo", err)
return
}
// Make sure page number is at least 1. Will be posted to ctx.Data.
page := ctx.FormInt("page")
if page <= 1 {
page = 1
}
opts.Paginator = &db.ListOptions{
Page: page,
PageSize: setting.UI.IssuePagingNum,
}
// Get IDs for labels (a filter option for issues/pulls).
// Required for IssuesOptions.
var labelIDs []int64
selectedLabels := ctx.FormString("labels")
if len(selectedLabels) > 0 && selectedLabels != "0" {
var err error
labelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
if err != nil {
ctx.ServerError("StringsToInt64s", err)
return
}
}
opts.LabelIDs = labelIDs
// Parse ctx.FormString("repos") and remember matched repo IDs for later.
// Gets set when clicking filters on the issues overview page.
selectedRepoIDs := getRepoIDs(ctx.FormString("repos"))
// Remove repo IDs that are not accessible to the user.
selectedRepoIDs = util.SliceRemoveAllFunc(selectedRepoIDs, func(v int64) bool {
return !accessibleRepos.Contains(v)
})
if len(selectedRepoIDs) > 0 {
opts.RepoIDs = selectedRepoIDs
}
// ------------------------------
// Get issues as defined by opts.
// ------------------------------
// Slice of Issues that will be displayed on the overview page
// USING FINAL STATE OF opts FOR A QUERY.
var issues issues_model.IssueList
{
issueIDs, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("issueIDsFromSearch", err)
return
}
issues, err = issues_model.GetIssuesByIDs(ctx, issueIDs, true)
if err != nil {
ctx.ServerError("GetIssuesByIDs", err)
return
}
}
// ----------------------------------
// Add repository pointers to Issues.
// ----------------------------------
// Remove repositories that should not be shown,
// which are repositories that have no issues and are not selected by the user.
selectedRepos := container.SetOf(selectedRepoIDs...)
for k, v := range issueCountByRepo {
if v == 0 && !selectedRepos.Contains(k) {
delete(issueCountByRepo, k)
}
}
// showReposMap maps repository IDs to their Repository pointers.
showReposMap, err := loadRepoByIDs(ctxUser, issueCountByRepo, unitType)
if err != nil {
if repo_model.IsErrRepoNotExist(err) {
ctx.NotFound("GetRepositoryByID", err)
return
}
ctx.ServerError("loadRepoByIDs", err)
return
}
// a RepositoryList
showRepos := repo_model.RepositoryListOfMap(showReposMap)
sort.Sort(showRepos)
// maps pull request IDs to their CommitStatus. Will be posted to ctx.Data.
for _, issue := range issues {
if issue.Repo == nil {
issue.Repo = showReposMap[issue.RepoID]
}
}
commitStatuses, lastStatus, err := pull_service.GetIssuesAllCommitStatus(ctx, issues)
if err != nil {
ctx.ServerError("GetIssuesLastCommitStatus", err)
return
}
// -------------------------------
// Fill stats to post to ctx.Data.
// -------------------------------
issueStats, err := getUserIssueStats(ctx, filterMode, issue_indexer.ToSearchOptions(keyword, opts), ctx.Doer.ID)
if err != nil {
ctx.ServerError("getUserIssueStats", err)
return
}
// Will be posted to ctx.Data.
var shownIssues int
if !isShowClosed {
shownIssues = int(issueStats.OpenCount)
} else {
shownIssues = int(issueStats.ClosedCount)
}
if len(opts.RepoIDs) != 0 {
shownIssues = 0
for _, repoID := range opts.RepoIDs {
shownIssues += int(issueCountByRepo[repoID])
}
}
var allIssueCount int64
for _, issueCount := range issueCountByRepo {
allIssueCount += issueCount
}
ctx.Data["TotalIssueCount"] = allIssueCount
if len(opts.RepoIDs) == 1 {
repo := showReposMap[opts.RepoIDs[0]]
if repo != nil {
ctx.Data["SingleRepoLink"] = repo.Link()
}
}
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
if err := issues.LoadAttributes(ctx); err != nil {
ctx.ServerError("issues.LoadAttributes", err)
return
}
ctx.Data["Issues"] = issues
approvalCounts, err := issues.GetApprovalCounts(ctx)
if err != nil {
ctx.ServerError("ApprovalCounts", err)
return
}
ctx.Data["ApprovalCounts"] = func(issueID int64, typ string) int64 {
counts, ok := approvalCounts[issueID]
if !ok || len(counts) == 0 {
return 0
}
reviewTyp := issues_model.ReviewTypeApprove
if typ == "reject" {
reviewTyp = issues_model.ReviewTypeReject
} else if typ == "waiting" {
reviewTyp = issues_model.ReviewTypeRequest
}
for _, count := range counts {
if count.Type == reviewTyp {
return count.Count
}
}
return 0
}
ctx.Data["CommitLastStatus"] = lastStatus
ctx.Data["CommitStatuses"] = commitStatuses
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = issueCountByRepo
ctx.Data["IssueStats"] = issueStats
ctx.Data["ViewType"] = viewType
ctx.Data["SortType"] = sortType
ctx.Data["RepoIDs"] = selectedRepoIDs
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["SelectLabels"] = selectedLabels
if isShowClosed {
ctx.Data["State"] = "closed"
} else {
ctx.Data["State"] = "open"
}
// Convert []int64 to string
reposParam, _ := json.Marshal(opts.RepoIDs)
ctx.Data["ReposParam"] = string(reposParam)
pager := context.NewPagination(shownIssues, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "type", "ViewType")
pager.AddParam(ctx, "repos", "ReposParam")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
pager.AddParam(ctx, "labels", "SelectLabels")
pager.AddParam(ctx, "milestone", "MilestoneID")
pager.AddParam(ctx, "assignee", "AssigneeID")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplIssues)
}
func getRepoIDs(reposQuery string) []int64 {
if len(reposQuery) == 0 || reposQuery == "[]" {
return []int64{}
}
if !issueReposQueryPattern.MatchString(reposQuery) {
log.Warn("issueReposQueryPattern does not match query: %q", reposQuery)
return []int64{}
}
var repoIDs []int64
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
return repoIDs
}
func loadRepoByIDs(ctxUser *user_model.User, issueCountByRepo map[int64]int64, unitType unit.Type) (map[int64]*repo_model.Repository, error) {
totalRes := make(map[int64]*repo_model.Repository, len(issueCountByRepo))
repoIDs := make([]int64, 0, 500)
for id := range issueCountByRepo {
if id <= 0 {
continue
}
repoIDs = append(repoIDs, id)
if len(repoIDs) == 500 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
repoIDs = repoIDs[:0]
}
}
if len(repoIDs) > 0 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
}
return totalRes, nil
}
// ShowSSHKeys output all the ssh keys of user by uid
func ShowSSHKeys(ctx *context.Context) {
keys, err := asymkey_model.ListPublicKeys(ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListPublicKeys", err)
return
}
var buf bytes.Buffer
for i := range keys {
buf.WriteString(keys[i].OmitEmail())
buf.WriteString("\n")
}
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
// ShowGPGKeys output all the public GPG keys of user by uid
func ShowGPGKeys(ctx *context.Context) {
keys, err := asymkey_model.ListGPGKeys(ctx, ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListGPGKeys", err)
return
}
entities := make([]*openpgp.Entity, 0)
failedEntitiesID := make([]string, 0)
for _, k := range keys {
e, err := asymkey_model.GPGKeyToEntity(k)
if err != nil {
if asymkey_model.IsErrGPGKeyImportNotExist(err) {
failedEntitiesID = append(failedEntitiesID, k.KeyID)
continue // Skip previous import without backup of imported armored key
}
ctx.ServerError("ShowGPGKeys", err)
return
}
entities = append(entities, e)
}
var buf bytes.Buffer
headers := make(map[string]string)
if len(failedEntitiesID) > 0 { // If some key need re-import to be exported
headers["Note"] = fmt.Sprintf("The keys with the following IDs couldn't be exported and need to be reuploaded %s", strings.Join(failedEntitiesID, ", "))
} else if len(entities) == 0 {
headers["Note"] = "This user hasn't uploaded any GPG keys."
}
writer, _ := armor.Encode(&buf, "PGP PUBLIC KEY BLOCK", headers)
for _, e := range entities {
err = e.Serialize(writer) // TODO find why key are exported with a different cipherTypeByte as original (should not be blocking but strange)
if err != nil {
ctx.ServerError("ShowGPGKeys", err)
return
}
}
writer.Close()
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
func UsernameSubRoute(ctx *context.Context) {
// WORKAROUND to support usernames with "." in it
// https://github.com/go-chi/chi/issues/781
username := ctx.Params("username")
reloadParam := func(suffix string) (success bool) {
ctx.SetParams("username", strings.TrimSuffix(username, suffix))
context_service.UserAssignmentWeb()(ctx)
return !ctx.Written()
}
switch {
case strings.HasSuffix(username, ".png"):
if reloadParam(".png") {
AvatarByUserName(ctx)
}
case strings.HasSuffix(username, ".keys"):
if reloadParam(".keys") {
ShowSSHKeys(ctx)
}
case strings.HasSuffix(username, ".gpg"):
if reloadParam(".gpg") {
ShowGPGKeys(ctx)
}
case strings.HasSuffix(username, ".rss"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".rss") {
context_service.UserAssignmentWeb()(ctx)
feed.ShowUserFeedRSS(ctx)
}
case strings.HasSuffix(username, ".atom"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".atom") {
feed.ShowUserFeedAtom(ctx)
}
default:
context_service.UserAssignmentWeb()(ctx)
if !ctx.Written() {
ctx.Data["EnableFeed"] = setting.Other.EnableFeed
OwnerProfile(ctx)
}
}
}
func getUserIssueStats(ctx *context.Context, filterMode int, opts *issue_indexer.SearchOptions, doerID int64) (*issues_model.IssueStats, error) {
opts = opts.Copy(func(o *issue_indexer.SearchOptions) {
o.AssigneeID = nil
o.PosterID = nil
o.MentionID = nil
o.ReviewRequestedID = nil
o.ReviewedID = nil
})
var (
err error
ret = &issues_model.IssueStats{}
)
{
openClosedOpts := opts.Copy()
switch filterMode {
case issues_model.FilterModeAll, issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
openClosedOpts.AssigneeID = &doerID
case issues_model.FilterModeCreate:
openClosedOpts.PosterID = &doerID
case issues_model.FilterModeMention:
openClosedOpts.MentionID = &doerID
case issues_model.FilterModeReviewRequested:
openClosedOpts.ReviewRequestedID = &doerID
case issues_model.FilterModeReviewed:
openClosedOpts.ReviewedID = &doerID
}
openClosedOpts.IsClosed = util.OptionalBoolFalse
ret.OpenCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
openClosedOpts.IsClosed = util.OptionalBoolTrue
ret.ClosedCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
}
ret.YourRepositoriesCount, err = issue_indexer.CountIssues(ctx, opts)
if err != nil {
return nil, err
}
ret.AssignCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AssigneeID = &doerID }))
if err != nil {
return nil, err
}
ret.CreateCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.PosterID = &doerID }))
if err != nil {
return nil, err
}
ret.MentionCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.MentionID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewRequestedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewRequestedID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewedID = &doerID }))
if err != nil {
return nil, err
}
return ret, nil
}
| {
if unit.TypeIssues.UnitGlobalDisabled() && unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Milestones overview page not available as both issues and pull requests are globally disabled")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("milestones")
ctx.Data["PageIsMilestonesDashboard"] = true
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
repoOpts := repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false, // Include also all public repositories of users and public organisations
AllLimited: false, // Include also all public repositories of limited organisations
Archived: util.OptionalBoolFalse,
HasMilestones: util.OptionalBoolTrue, // Just needs display repos has milestones
}
if ctxUser.IsOrganization() && ctx.Org.Team != nil {
repoOpts.TeamID = ctx.Org.Team.ID
}
var (
userRepoCond = repo_model.SearchRepositoryCondition(&repoOpts) // all repo condition user could visit
repoCond = userRepoCond
repoIDs []int64
reposQuery = ctx.FormString("repos")
isShowClosed = ctx.FormString("state") == "closed"
sortType = ctx.FormString("sort")
page = ctx.FormInt("page")
keyword = ctx.FormTrim("q")
)
if page <= 1 {
page = 1
}
if len(reposQuery) != 0 {
if issueReposQueryPattern.MatchString(reposQuery) {
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
// If the repo id specified by query is not parseable or not accessible by user, just ignore it.
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
if len(repoIDs) > 0 {
// Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs
// But the original repoCond has a limitation
repoCond = repoCond.And(builder.In("id", repoIDs))
}
} else {
log.Warn("issueReposQueryPattern not match with query")
}
}
counts, err := issues_model.CountMilestonesByRepoCondAndKw(userRepoCond, keyword, isShowClosed)
if err != nil {
ctx.ServerError("CountMilestonesByRepoIDs", err)
return
}
milestones, err := issues_model.SearchMilestones(repoCond, page, isShowClosed, sortType, keyword)
if err != nil {
ctx.ServerError("SearchMilestones", err)
return
}
showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false)
if err != nil {
ctx.ServerError("SearchRepositoryByCondition", err)
return
}
sort.Sort(showRepos)
for i := 0; i < len(milestones); {
for _, repo := range showRepos {
if milestones[i].RepoID == repo.ID {
milestones[i].Repo = repo
break
}
}
if milestones[i].Repo == nil {
log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID)
milestones = append(milestones[:i], milestones[i+1:]...)
continue
}
milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{
URLPrefix: milestones[i].Repo.Link(),
Metas: milestones[i].Repo.ComposeMetas(),
Ctx: ctx,
}, milestones[i].Content)
if err != nil {
ctx.ServerError("RenderString", err)
return
}
if milestones[i].Repo.IsTimetrackerEnabled(ctx) {
err := milestones[i].LoadTotalTrackedTime()
if err != nil {
ctx.ServerError("LoadTotalTrackedTime", err)
return
}
}
i++
}
milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
var totalMilestoneStats *issues_model.MilestonesStats
if len(repoIDs) == 0 {
totalMilestoneStats = milestoneStats
} else {
totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
}
showRepoIds := make(container.Set[int64], len(showRepos))
for _, repo := range showRepos {
if repo.ID > 0 {
showRepoIds.Add(repo.ID)
}
}
if len(repoIDs) == 0 {
repoIDs = showRepoIds.Values()
}
repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool {
return !showRepoIds.Contains(v)
})
var pagerCount int
if isShowClosed {
ctx.Data["State"] = "closed"
ctx.Data["Total"] = totalMilestoneStats.ClosedCount
pagerCount = int(milestoneStats.ClosedCount)
} else {
ctx.Data["State"] = "open"
ctx.Data["Total"] = totalMilestoneStats.OpenCount
pagerCount = int(milestoneStats.OpenCount)
}
ctx.Data["Milestones"] = milestones
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = counts
ctx.Data["MilestoneStats"] = milestoneStats
ctx.Data["SortType"] = sortType
ctx.Data["Keyword"] = keyword
ctx.Data["RepoIDs"] = repoIDs
ctx.Data["IsShowClosed"] = isShowClosed
pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "repos", "RepoIDs")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplMilestones)
} | identifier_body |
home.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package user
import (
"bytes"
"fmt"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
activities_model "code.gitea.io/gitea/models/activities"
asymkey_model "code.gitea.io/gitea/models/asymkey"
"code.gitea.io/gitea/models/db"
issues_model "code.gitea.io/gitea/models/issues"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/context"
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/routers/web/feed"
context_service "code.gitea.io/gitea/services/context"
issue_service "code.gitea.io/gitea/services/issue"
pull_service "code.gitea.io/gitea/services/pull"
"github.com/keybase/go-crypto/openpgp"
"github.com/keybase/go-crypto/openpgp/armor"
"xorm.io/builder"
)
const (
tplDashboard base.TplName = "user/dashboard/dashboard"
tplIssues base.TplName = "user/dashboard/issues"
tplMilestones base.TplName = "user/dashboard/milestones"
tplProfile base.TplName = "user/profile"
)
// getDashboardContextUser finds out which context user dashboard is being viewed as .
func getDashboardContextUser(ctx *context.Context) *user_model.User {
ctxUser := ctx.Doer
orgName := ctx.Params(":org")
if len(orgName) > 0 {
ctxUser = ctx.Org.Organization.AsUser()
ctx.Data["Teams"] = ctx.Org.Teams
}
ctx.Data["ContextUser"] = ctxUser
orgs, err := organization.GetUserOrgsList(ctx.Doer)
if err != nil {
ctx.ServerError("GetUserOrgsList", err)
return nil
}
ctx.Data["Orgs"] = orgs
return ctxUser
}
// Dashboard render the dashboard page
func Dashboard(ctx *context.Context) {
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
date = ctx.FormString("date")
page = ctx.FormInt("page")
)
// Make sure page number is at least 1. Will be posted to ctx.Data.
if page <= 1 {
page = 1
}
ctx.Data["Title"] = ctxUser.DisplayName() + " - " + ctx.Tr("dashboard")
ctx.Data["PageIsDashboard"] = true
ctx.Data["PageIsNews"] = true
cnt, _ := organization.GetOrganizationCount(ctx, ctxUser)
ctx.Data["UserOrgsCount"] = cnt
ctx.Data["MirrorsEnabled"] = setting.Mirror.Enabled
ctx.Data["Date"] = date
var uid int64
if ctxUser != nil {
uid = ctxUser.ID
}
ctx.PageData["dashboardRepoList"] = map[string]any{
"searchLimit": setting.UI.User.RepoPagingNum,
"uid": uid,
}
if setting.Service.EnableUserHeatmap {
data, err := activities_model.GetUserHeatmapDataByUserTeam(ctxUser, ctx.Org.Team, ctx.Doer)
if err != nil {
ctx.ServerError("GetUserHeatmapDataByUserTeam", err)
return
}
ctx.Data["HeatmapData"] = data
ctx.Data["HeatmapTotalContributions"] = activities_model.GetTotalContributionsInHeatmap(data)
}
feeds, count, err := activities_model.GetFeeds(ctx, activities_model.GetFeedsOptions{
RequestedUser: ctxUser,
RequestedTeam: ctx.Org.Team,
Actor: ctx.Doer,
IncludePrivate: true,
OnlyPerformedBy: false,
IncludeDeleted: false,
Date: ctx.FormString("date"),
ListOptions: db.ListOptions{
Page: page,
PageSize: setting.UI.FeedPagingNum,
},
})
if err != nil {
ctx.ServerError("GetFeeds", err)
return
}
ctx.Data["Feeds"] = feeds
pager := context.NewPagination(int(count), setting.UI.FeedPagingNum, page, 5)
pager.AddParam(ctx, "date", "Date")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplDashboard)
}
// Milestones render the user milestones page
func Milestones(ctx *context.Context) {
if unit.TypeIssues.UnitGlobalDisabled() && unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Milestones overview page not available as both issues and pull requests are globally disabled")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("milestones")
ctx.Data["PageIsMilestonesDashboard"] = true
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
repoOpts := repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false, // Include also all public repositories of users and public organisations
AllLimited: false, // Include also all public repositories of limited organisations
Archived: util.OptionalBoolFalse,
HasMilestones: util.OptionalBoolTrue, // Just needs display repos has milestones
}
if ctxUser.IsOrganization() && ctx.Org.Team != nil {
repoOpts.TeamID = ctx.Org.Team.ID
}
var (
userRepoCond = repo_model.SearchRepositoryCondition(&repoOpts) // all repo condition user could visit
repoCond = userRepoCond
repoIDs []int64
reposQuery = ctx.FormString("repos")
isShowClosed = ctx.FormString("state") == "closed"
sortType = ctx.FormString("sort")
page = ctx.FormInt("page")
keyword = ctx.FormTrim("q")
)
if page <= 1 {
page = 1
}
if len(reposQuery) != 0 {
if issueReposQueryPattern.MatchString(reposQuery) {
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
// If the repo id specified by query is not parseable or not accessible by user, just ignore it.
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
if len(repoIDs) > 0 {
// Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs
// But the original repoCond has a limitation
repoCond = repoCond.And(builder.In("id", repoIDs))
}
} else {
log.Warn("issueReposQueryPattern not match with query")
}
}
counts, err := issues_model.CountMilestonesByRepoCondAndKw(userRepoCond, keyword, isShowClosed)
if err != nil {
ctx.ServerError("CountMilestonesByRepoIDs", err)
return
}
milestones, err := issues_model.SearchMilestones(repoCond, page, isShowClosed, sortType, keyword) | ctx.ServerError("SearchMilestones", err)
return
}
showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false)
if err != nil {
ctx.ServerError("SearchRepositoryByCondition", err)
return
}
sort.Sort(showRepos)
for i := 0; i < len(milestones); {
for _, repo := range showRepos {
if milestones[i].RepoID == repo.ID {
milestones[i].Repo = repo
break
}
}
if milestones[i].Repo == nil {
log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID)
milestones = append(milestones[:i], milestones[i+1:]...)
continue
}
milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{
URLPrefix: milestones[i].Repo.Link(),
Metas: milestones[i].Repo.ComposeMetas(),
Ctx: ctx,
}, milestones[i].Content)
if err != nil {
ctx.ServerError("RenderString", err)
return
}
if milestones[i].Repo.IsTimetrackerEnabled(ctx) {
err := milestones[i].LoadTotalTrackedTime()
if err != nil {
ctx.ServerError("LoadTotalTrackedTime", err)
return
}
}
i++
}
milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
var totalMilestoneStats *issues_model.MilestonesStats
if len(repoIDs) == 0 {
totalMilestoneStats = milestoneStats
} else {
totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
}
showRepoIds := make(container.Set[int64], len(showRepos))
for _, repo := range showRepos {
if repo.ID > 0 {
showRepoIds.Add(repo.ID)
}
}
if len(repoIDs) == 0 {
repoIDs = showRepoIds.Values()
}
repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool {
return !showRepoIds.Contains(v)
})
var pagerCount int
if isShowClosed {
ctx.Data["State"] = "closed"
ctx.Data["Total"] = totalMilestoneStats.ClosedCount
pagerCount = int(milestoneStats.ClosedCount)
} else {
ctx.Data["State"] = "open"
ctx.Data["Total"] = totalMilestoneStats.OpenCount
pagerCount = int(milestoneStats.OpenCount)
}
ctx.Data["Milestones"] = milestones
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = counts
ctx.Data["MilestoneStats"] = milestoneStats
ctx.Data["SortType"] = sortType
ctx.Data["Keyword"] = keyword
ctx.Data["RepoIDs"] = repoIDs
ctx.Data["IsShowClosed"] = isShowClosed
pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "repos", "RepoIDs")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplMilestones)
}
// Pulls renders the user's pull request overview page
func Pulls(ctx *context.Context) {
if unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Pull request overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("pull_requests")
ctx.Data["PageIsPulls"] = true
ctx.Data["SingleRepoAction"] = "pull"
buildIssueOverview(ctx, unit.TypePullRequests)
}
// Issues renders the user's issues overview page
func Issues(ctx *context.Context) {
if unit.TypeIssues.UnitGlobalDisabled() {
log.Debug("Issues overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("issues")
ctx.Data["PageIsIssues"] = true
ctx.Data["SingleRepoAction"] = "issue"
buildIssueOverview(ctx, unit.TypeIssues)
}
// Regexp for repos query
var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`)
func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
// ----------------------------------------------------
// Determine user; can be either user or organization.
// Return with NotFound or ServerError if unsuccessful.
// ----------------------------------------------------
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
viewType string
sortType = ctx.FormString("sort")
filterMode int
)
// Default to recently updated, unlike repository issues list
if sortType == "" {
sortType = "recentupdate"
}
// --------------------------------------------------------------------------------
// Distinguish User from Organization.
// Org:
// - Remember pre-determined viewType string for later. Will be posted to ctx.Data.
// Organization does not have view type and filter mode.
// User:
// - Use ctx.FormString("type") to determine filterMode.
// The type is set when clicking for example "assigned to me" on the overview page.
// - Remember either this or a fallback. Will be posted to ctx.Data.
// --------------------------------------------------------------------------------
// TODO: distinguish during routing
viewType = ctx.FormString("type")
switch viewType {
case "assigned":
filterMode = issues_model.FilterModeAssign
case "created_by":
filterMode = issues_model.FilterModeCreate
case "mentioned":
filterMode = issues_model.FilterModeMention
case "review_requested":
filterMode = issues_model.FilterModeReviewRequested
case "reviewed_by":
filterMode = issues_model.FilterModeReviewed
case "your_repositories":
fallthrough
default:
filterMode = issues_model.FilterModeYourRepositories
viewType = "your_repositories"
}
// --------------------------------------------------------------------------
// Build opts (IssuesOptions), which contains filter information.
// Will eventually be used to retrieve issues relevant for the overview page.
// Note: Non-final states of opts are used in-between, namely for:
// - Keyword search
// - Count Issues by repo
// --------------------------------------------------------------------------
// Get repository IDs where User/Org/Team has access.
var team *organization.Team
var org *organization.Organization
if ctx.Org != nil {
org = ctx.Org.Organization
team = ctx.Org.Team
}
isPullList := unitType == unit.TypePullRequests
opts := &issues_model.IssuesOptions{
IsPull: util.OptionalBoolOf(isPullList),
SortType: sortType,
IsArchived: util.OptionalBoolFalse,
Org: org,
Team: team,
User: ctx.Doer,
}
// Search all repositories which
//
// As user:
// - Owns the repository.
// - Have collaborator permissions in repository.
//
// As org:
// - Owns the repository.
//
// As team:
// - Team org's owns the repository.
// - Team has read permission to repository.
repoOpts := &repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false,
AllLimited: false,
Collaborate: util.OptionalBoolNone,
UnitType: unitType,
Archived: util.OptionalBoolFalse,
}
if team != nil {
repoOpts.TeamID = team.ID
}
accessibleRepos := container.Set[int64]{}
{
ids, _, err := repo_model.SearchRepositoryIDs(repoOpts)
if err != nil {
ctx.ServerError("SearchRepositoryIDs", err)
return
}
accessibleRepos.AddMultiple(ids...)
opts.RepoIDs = ids
if len(opts.RepoIDs) == 0 {
// no repos found, don't let the indexer return all repos
opts.RepoIDs = []int64{0}
}
}
switch filterMode {
case issues_model.FilterModeAll:
case issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
opts.AssigneeID = ctx.Doer.ID
case issues_model.FilterModeCreate:
opts.PosterID = ctx.Doer.ID
case issues_model.FilterModeMention:
opts.MentionedID = ctx.Doer.ID
case issues_model.FilterModeReviewRequested:
opts.ReviewRequestedID = ctx.Doer.ID
case issues_model.FilterModeReviewed:
opts.ReviewedID = ctx.Doer.ID
}
// keyword holds the search term entered into the search field.
keyword := strings.Trim(ctx.FormString("q"), " ")
ctx.Data["Keyword"] = keyword
// Educated guess: Do or don't show closed issues.
isShowClosed := ctx.FormString("state") == "closed"
opts.IsClosed = util.OptionalBoolOf(isShowClosed)
// Filter repos and count issues in them. Count will be used later.
// USING NON-FINAL STATE OF opts FOR A QUERY.
issueCountByRepo, err := issue_indexer.CountIssuesByRepo(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("CountIssuesByRepo", err)
return
}
// Make sure page number is at least 1. Will be posted to ctx.Data.
page := ctx.FormInt("page")
if page <= 1 {
page = 1
}
opts.Paginator = &db.ListOptions{
Page: page,
PageSize: setting.UI.IssuePagingNum,
}
// Get IDs for labels (a filter option for issues/pulls).
// Required for IssuesOptions.
var labelIDs []int64
selectedLabels := ctx.FormString("labels")
if len(selectedLabels) > 0 && selectedLabels != "0" {
var err error
labelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
if err != nil {
ctx.ServerError("StringsToInt64s", err)
return
}
}
opts.LabelIDs = labelIDs
// Parse ctx.FormString("repos") and remember matched repo IDs for later.
// Gets set when clicking filters on the issues overview page.
selectedRepoIDs := getRepoIDs(ctx.FormString("repos"))
// Remove repo IDs that are not accessible to the user.
selectedRepoIDs = util.SliceRemoveAllFunc(selectedRepoIDs, func(v int64) bool {
return !accessibleRepos.Contains(v)
})
if len(selectedRepoIDs) > 0 {
opts.RepoIDs = selectedRepoIDs
}
// ------------------------------
// Get issues as defined by opts.
// ------------------------------
// Slice of Issues that will be displayed on the overview page
// USING FINAL STATE OF opts FOR A QUERY.
var issues issues_model.IssueList
{
issueIDs, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("issueIDsFromSearch", err)
return
}
issues, err = issues_model.GetIssuesByIDs(ctx, issueIDs, true)
if err != nil {
ctx.ServerError("GetIssuesByIDs", err)
return
}
}
// ----------------------------------
// Add repository pointers to Issues.
// ----------------------------------
// Remove repositories that should not be shown,
// which are repositories that have no issues and are not selected by the user.
selectedRepos := container.SetOf(selectedRepoIDs...)
for k, v := range issueCountByRepo {
if v == 0 && !selectedRepos.Contains(k) {
delete(issueCountByRepo, k)
}
}
// showReposMap maps repository IDs to their Repository pointers.
showReposMap, err := loadRepoByIDs(ctxUser, issueCountByRepo, unitType)
if err != nil {
if repo_model.IsErrRepoNotExist(err) {
ctx.NotFound("GetRepositoryByID", err)
return
}
ctx.ServerError("loadRepoByIDs", err)
return
}
// a RepositoryList
showRepos := repo_model.RepositoryListOfMap(showReposMap)
sort.Sort(showRepos)
// maps pull request IDs to their CommitStatus. Will be posted to ctx.Data.
for _, issue := range issues {
if issue.Repo == nil {
issue.Repo = showReposMap[issue.RepoID]
}
}
commitStatuses, lastStatus, err := pull_service.GetIssuesAllCommitStatus(ctx, issues)
if err != nil {
ctx.ServerError("GetIssuesLastCommitStatus", err)
return
}
// -------------------------------
// Fill stats to post to ctx.Data.
// -------------------------------
issueStats, err := getUserIssueStats(ctx, filterMode, issue_indexer.ToSearchOptions(keyword, opts), ctx.Doer.ID)
if err != nil {
ctx.ServerError("getUserIssueStats", err)
return
}
// Will be posted to ctx.Data.
var shownIssues int
if !isShowClosed {
shownIssues = int(issueStats.OpenCount)
} else {
shownIssues = int(issueStats.ClosedCount)
}
if len(opts.RepoIDs) != 0 {
shownIssues = 0
for _, repoID := range opts.RepoIDs {
shownIssues += int(issueCountByRepo[repoID])
}
}
var allIssueCount int64
for _, issueCount := range issueCountByRepo {
allIssueCount += issueCount
}
ctx.Data["TotalIssueCount"] = allIssueCount
if len(opts.RepoIDs) == 1 {
repo := showReposMap[opts.RepoIDs[0]]
if repo != nil {
ctx.Data["SingleRepoLink"] = repo.Link()
}
}
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
if err := issues.LoadAttributes(ctx); err != nil {
ctx.ServerError("issues.LoadAttributes", err)
return
}
ctx.Data["Issues"] = issues
approvalCounts, err := issues.GetApprovalCounts(ctx)
if err != nil {
ctx.ServerError("ApprovalCounts", err)
return
}
ctx.Data["ApprovalCounts"] = func(issueID int64, typ string) int64 {
counts, ok := approvalCounts[issueID]
if !ok || len(counts) == 0 {
return 0
}
reviewTyp := issues_model.ReviewTypeApprove
if typ == "reject" {
reviewTyp = issues_model.ReviewTypeReject
} else if typ == "waiting" {
reviewTyp = issues_model.ReviewTypeRequest
}
for _, count := range counts {
if count.Type == reviewTyp {
return count.Count
}
}
return 0
}
ctx.Data["CommitLastStatus"] = lastStatus
ctx.Data["CommitStatuses"] = commitStatuses
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = issueCountByRepo
ctx.Data["IssueStats"] = issueStats
ctx.Data["ViewType"] = viewType
ctx.Data["SortType"] = sortType
ctx.Data["RepoIDs"] = selectedRepoIDs
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["SelectLabels"] = selectedLabels
if isShowClosed {
ctx.Data["State"] = "closed"
} else {
ctx.Data["State"] = "open"
}
// Convert []int64 to string
reposParam, _ := json.Marshal(opts.RepoIDs)
ctx.Data["ReposParam"] = string(reposParam)
pager := context.NewPagination(shownIssues, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "type", "ViewType")
pager.AddParam(ctx, "repos", "ReposParam")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
pager.AddParam(ctx, "labels", "SelectLabels")
pager.AddParam(ctx, "milestone", "MilestoneID")
pager.AddParam(ctx, "assignee", "AssigneeID")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplIssues)
}
func getRepoIDs(reposQuery string) []int64 {
if len(reposQuery) == 0 || reposQuery == "[]" {
return []int64{}
}
if !issueReposQueryPattern.MatchString(reposQuery) {
log.Warn("issueReposQueryPattern does not match query: %q", reposQuery)
return []int64{}
}
var repoIDs []int64
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
return repoIDs
}
func loadRepoByIDs(ctxUser *user_model.User, issueCountByRepo map[int64]int64, unitType unit.Type) (map[int64]*repo_model.Repository, error) {
totalRes := make(map[int64]*repo_model.Repository, len(issueCountByRepo))
repoIDs := make([]int64, 0, 500)
for id := range issueCountByRepo {
if id <= 0 {
continue
}
repoIDs = append(repoIDs, id)
if len(repoIDs) == 500 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
repoIDs = repoIDs[:0]
}
}
if len(repoIDs) > 0 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
}
return totalRes, nil
}
// ShowSSHKeys output all the ssh keys of user by uid
func ShowSSHKeys(ctx *context.Context) {
keys, err := asymkey_model.ListPublicKeys(ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListPublicKeys", err)
return
}
var buf bytes.Buffer
for i := range keys {
buf.WriteString(keys[i].OmitEmail())
buf.WriteString("\n")
}
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
// ShowGPGKeys output all the public GPG keys of user by uid
func ShowGPGKeys(ctx *context.Context) {
keys, err := asymkey_model.ListGPGKeys(ctx, ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListGPGKeys", err)
return
}
entities := make([]*openpgp.Entity, 0)
failedEntitiesID := make([]string, 0)
for _, k := range keys {
e, err := asymkey_model.GPGKeyToEntity(k)
if err != nil {
if asymkey_model.IsErrGPGKeyImportNotExist(err) {
failedEntitiesID = append(failedEntitiesID, k.KeyID)
continue // Skip previous import without backup of imported armored key
}
ctx.ServerError("ShowGPGKeys", err)
return
}
entities = append(entities, e)
}
var buf bytes.Buffer
headers := make(map[string]string)
if len(failedEntitiesID) > 0 { // If some key need re-import to be exported
headers["Note"] = fmt.Sprintf("The keys with the following IDs couldn't be exported and need to be reuploaded %s", strings.Join(failedEntitiesID, ", "))
} else if len(entities) == 0 {
headers["Note"] = "This user hasn't uploaded any GPG keys."
}
writer, _ := armor.Encode(&buf, "PGP PUBLIC KEY BLOCK", headers)
for _, e := range entities {
err = e.Serialize(writer) // TODO find why key are exported with a different cipherTypeByte as original (should not be blocking but strange)
if err != nil {
ctx.ServerError("ShowGPGKeys", err)
return
}
}
writer.Close()
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
func UsernameSubRoute(ctx *context.Context) {
// WORKAROUND to support usernames with "." in it
// https://github.com/go-chi/chi/issues/781
username := ctx.Params("username")
reloadParam := func(suffix string) (success bool) {
ctx.SetParams("username", strings.TrimSuffix(username, suffix))
context_service.UserAssignmentWeb()(ctx)
return !ctx.Written()
}
switch {
case strings.HasSuffix(username, ".png"):
if reloadParam(".png") {
AvatarByUserName(ctx)
}
case strings.HasSuffix(username, ".keys"):
if reloadParam(".keys") {
ShowSSHKeys(ctx)
}
case strings.HasSuffix(username, ".gpg"):
if reloadParam(".gpg") {
ShowGPGKeys(ctx)
}
case strings.HasSuffix(username, ".rss"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".rss") {
context_service.UserAssignmentWeb()(ctx)
feed.ShowUserFeedRSS(ctx)
}
case strings.HasSuffix(username, ".atom"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".atom") {
feed.ShowUserFeedAtom(ctx)
}
default:
context_service.UserAssignmentWeb()(ctx)
if !ctx.Written() {
ctx.Data["EnableFeed"] = setting.Other.EnableFeed
OwnerProfile(ctx)
}
}
}
func getUserIssueStats(ctx *context.Context, filterMode int, opts *issue_indexer.SearchOptions, doerID int64) (*issues_model.IssueStats, error) {
opts = opts.Copy(func(o *issue_indexer.SearchOptions) {
o.AssigneeID = nil
o.PosterID = nil
o.MentionID = nil
o.ReviewRequestedID = nil
o.ReviewedID = nil
})
var (
err error
ret = &issues_model.IssueStats{}
)
{
openClosedOpts := opts.Copy()
switch filterMode {
case issues_model.FilterModeAll, issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
openClosedOpts.AssigneeID = &doerID
case issues_model.FilterModeCreate:
openClosedOpts.PosterID = &doerID
case issues_model.FilterModeMention:
openClosedOpts.MentionID = &doerID
case issues_model.FilterModeReviewRequested:
openClosedOpts.ReviewRequestedID = &doerID
case issues_model.FilterModeReviewed:
openClosedOpts.ReviewedID = &doerID
}
openClosedOpts.IsClosed = util.OptionalBoolFalse
ret.OpenCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
openClosedOpts.IsClosed = util.OptionalBoolTrue
ret.ClosedCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
}
ret.YourRepositoriesCount, err = issue_indexer.CountIssues(ctx, opts)
if err != nil {
return nil, err
}
ret.AssignCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AssigneeID = &doerID }))
if err != nil {
return nil, err
}
ret.CreateCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.PosterID = &doerID }))
if err != nil {
return nil, err
}
ret.MentionCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.MentionID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewRequestedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewRequestedID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewedID = &doerID }))
if err != nil {
return nil, err
}
return ret, nil
} | if err != nil { | random_line_split |
home.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package user
import (
"bytes"
"fmt"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
activities_model "code.gitea.io/gitea/models/activities"
asymkey_model "code.gitea.io/gitea/models/asymkey"
"code.gitea.io/gitea/models/db"
issues_model "code.gitea.io/gitea/models/issues"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/context"
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/routers/web/feed"
context_service "code.gitea.io/gitea/services/context"
issue_service "code.gitea.io/gitea/services/issue"
pull_service "code.gitea.io/gitea/services/pull"
"github.com/keybase/go-crypto/openpgp"
"github.com/keybase/go-crypto/openpgp/armor"
"xorm.io/builder"
)
const (
tplDashboard base.TplName = "user/dashboard/dashboard"
tplIssues base.TplName = "user/dashboard/issues"
tplMilestones base.TplName = "user/dashboard/milestones"
tplProfile base.TplName = "user/profile"
)
// getDashboardContextUser finds out which context user dashboard is being viewed as .
func getDashboardContextUser(ctx *context.Context) *user_model.User {
ctxUser := ctx.Doer
orgName := ctx.Params(":org")
if len(orgName) > 0 {
ctxUser = ctx.Org.Organization.AsUser()
ctx.Data["Teams"] = ctx.Org.Teams
}
ctx.Data["ContextUser"] = ctxUser
orgs, err := organization.GetUserOrgsList(ctx.Doer)
if err != nil {
ctx.ServerError("GetUserOrgsList", err)
return nil
}
ctx.Data["Orgs"] = orgs
return ctxUser
}
// Dashboard render the dashboard page
func Dashboard(ctx *context.Context) {
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
date = ctx.FormString("date")
page = ctx.FormInt("page")
)
// Make sure page number is at least 1. Will be posted to ctx.Data.
if page <= 1 {
page = 1
}
ctx.Data["Title"] = ctxUser.DisplayName() + " - " + ctx.Tr("dashboard")
ctx.Data["PageIsDashboard"] = true
ctx.Data["PageIsNews"] = true
cnt, _ := organization.GetOrganizationCount(ctx, ctxUser)
ctx.Data["UserOrgsCount"] = cnt
ctx.Data["MirrorsEnabled"] = setting.Mirror.Enabled
ctx.Data["Date"] = date
var uid int64
if ctxUser != nil {
uid = ctxUser.ID
}
ctx.PageData["dashboardRepoList"] = map[string]any{
"searchLimit": setting.UI.User.RepoPagingNum,
"uid": uid,
}
if setting.Service.EnableUserHeatmap {
data, err := activities_model.GetUserHeatmapDataByUserTeam(ctxUser, ctx.Org.Team, ctx.Doer)
if err != nil {
ctx.ServerError("GetUserHeatmapDataByUserTeam", err)
return
}
ctx.Data["HeatmapData"] = data
ctx.Data["HeatmapTotalContributions"] = activities_model.GetTotalContributionsInHeatmap(data)
}
feeds, count, err := activities_model.GetFeeds(ctx, activities_model.GetFeedsOptions{
RequestedUser: ctxUser,
RequestedTeam: ctx.Org.Team,
Actor: ctx.Doer,
IncludePrivate: true,
OnlyPerformedBy: false,
IncludeDeleted: false,
Date: ctx.FormString("date"),
ListOptions: db.ListOptions{
Page: page,
PageSize: setting.UI.FeedPagingNum,
},
})
if err != nil {
ctx.ServerError("GetFeeds", err)
return
}
ctx.Data["Feeds"] = feeds
pager := context.NewPagination(int(count), setting.UI.FeedPagingNum, page, 5)
pager.AddParam(ctx, "date", "Date")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplDashboard)
}
// Milestones render the user milestones page
func Milestones(ctx *context.Context) {
if unit.TypeIssues.UnitGlobalDisabled() && unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Milestones overview page not available as both issues and pull requests are globally disabled")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("milestones")
ctx.Data["PageIsMilestonesDashboard"] = true
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
repoOpts := repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false, // Include also all public repositories of users and public organisations
AllLimited: false, // Include also all public repositories of limited organisations
Archived: util.OptionalBoolFalse,
HasMilestones: util.OptionalBoolTrue, // Just needs display repos has milestones
}
if ctxUser.IsOrganization() && ctx.Org.Team != nil {
repoOpts.TeamID = ctx.Org.Team.ID
}
var (
userRepoCond = repo_model.SearchRepositoryCondition(&repoOpts) // all repo condition user could visit
repoCond = userRepoCond
repoIDs []int64
reposQuery = ctx.FormString("repos")
isShowClosed = ctx.FormString("state") == "closed"
sortType = ctx.FormString("sort")
page = ctx.FormInt("page")
keyword = ctx.FormTrim("q")
)
if page <= 1 {
page = 1
}
if len(reposQuery) != 0 {
if issueReposQueryPattern.MatchString(reposQuery) {
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
// If the repo id specified by query is not parseable or not accessible by user, just ignore it.
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
if len(repoIDs) > 0 {
// Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs
// But the original repoCond has a limitation
repoCond = repoCond.And(builder.In("id", repoIDs))
}
} else {
log.Warn("issueReposQueryPattern not match with query")
}
}
counts, err := issues_model.CountMilestonesByRepoCondAndKw(userRepoCond, keyword, isShowClosed)
if err != nil {
ctx.ServerError("CountMilestonesByRepoIDs", err)
return
}
milestones, err := issues_model.SearchMilestones(repoCond, page, isShowClosed, sortType, keyword)
if err != nil {
ctx.ServerError("SearchMilestones", err)
return
}
showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false)
if err != nil {
ctx.ServerError("SearchRepositoryByCondition", err)
return
}
sort.Sort(showRepos)
for i := 0; i < len(milestones); {
for _, repo := range showRepos {
if milestones[i].RepoID == repo.ID {
milestones[i].Repo = repo
break
}
}
if milestones[i].Repo == nil {
log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID)
milestones = append(milestones[:i], milestones[i+1:]...)
continue
}
milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{
URLPrefix: milestones[i].Repo.Link(),
Metas: milestones[i].Repo.ComposeMetas(),
Ctx: ctx,
}, milestones[i].Content)
if err != nil {
ctx.ServerError("RenderString", err)
return
}
if milestones[i].Repo.IsTimetrackerEnabled(ctx) {
err := milestones[i].LoadTotalTrackedTime()
if err != nil {
ctx.ServerError("LoadTotalTrackedTime", err)
return
}
}
i++
}
milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
var totalMilestoneStats *issues_model.MilestonesStats
if len(repoIDs) == 0 {
totalMilestoneStats = milestoneStats
} else {
totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
}
showRepoIds := make(container.Set[int64], len(showRepos))
for _, repo := range showRepos {
if repo.ID > 0 {
showRepoIds.Add(repo.ID)
}
}
if len(repoIDs) == 0 {
repoIDs = showRepoIds.Values()
}
repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool {
return !showRepoIds.Contains(v)
})
var pagerCount int
if isShowClosed {
ctx.Data["State"] = "closed"
ctx.Data["Total"] = totalMilestoneStats.ClosedCount
pagerCount = int(milestoneStats.ClosedCount)
} else {
ctx.Data["State"] = "open"
ctx.Data["Total"] = totalMilestoneStats.OpenCount
pagerCount = int(milestoneStats.OpenCount)
}
ctx.Data["Milestones"] = milestones
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = counts
ctx.Data["MilestoneStats"] = milestoneStats
ctx.Data["SortType"] = sortType
ctx.Data["Keyword"] = keyword
ctx.Data["RepoIDs"] = repoIDs
ctx.Data["IsShowClosed"] = isShowClosed
pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "repos", "RepoIDs")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplMilestones)
}
// Pulls renders the user's pull request overview page
func Pulls(ctx *context.Context) {
if unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Pull request overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("pull_requests")
ctx.Data["PageIsPulls"] = true
ctx.Data["SingleRepoAction"] = "pull"
buildIssueOverview(ctx, unit.TypePullRequests)
}
// Issues renders the user's issues overview page
func Issues(ctx *context.Context) {
if unit.TypeIssues.UnitGlobalDisabled() {
log.Debug("Issues overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("issues")
ctx.Data["PageIsIssues"] = true
ctx.Data["SingleRepoAction"] = "issue"
buildIssueOverview(ctx, unit.TypeIssues)
}
// Regexp for repos query
var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`)
func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
// ----------------------------------------------------
// Determine user; can be either user or organization.
// Return with NotFound or ServerError if unsuccessful.
// ----------------------------------------------------
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
viewType string
sortType = ctx.FormString("sort")
filterMode int
)
// Default to recently updated, unlike repository issues list
if sortType == "" {
sortType = "recentupdate"
}
// --------------------------------------------------------------------------------
// Distinguish User from Organization.
// Org:
// - Remember pre-determined viewType string for later. Will be posted to ctx.Data.
// Organization does not have view type and filter mode.
// User:
// - Use ctx.FormString("type") to determine filterMode.
// The type is set when clicking for example "assigned to me" on the overview page.
// - Remember either this or a fallback. Will be posted to ctx.Data.
// --------------------------------------------------------------------------------
// TODO: distinguish during routing
viewType = ctx.FormString("type")
switch viewType {
case "assigned":
filterMode = issues_model.FilterModeAssign
case "created_by":
filterMode = issues_model.FilterModeCreate
case "mentioned":
filterMode = issues_model.FilterModeMention
case "review_requested":
filterMode = issues_model.FilterModeReviewRequested
case "reviewed_by":
filterMode = issues_model.FilterModeReviewed
case "your_repositories":
fallthrough
default:
filterMode = issues_model.FilterModeYourRepositories
viewType = "your_repositories"
}
// --------------------------------------------------------------------------
// Build opts (IssuesOptions), which contains filter information.
// Will eventually be used to retrieve issues relevant for the overview page.
// Note: Non-final states of opts are used in-between, namely for:
// - Keyword search
// - Count Issues by repo
// --------------------------------------------------------------------------
// Get repository IDs where User/Org/Team has access.
var team *organization.Team
var org *organization.Organization
if ctx.Org != nil {
org = ctx.Org.Organization
team = ctx.Org.Team
}
isPullList := unitType == unit.TypePullRequests
opts := &issues_model.IssuesOptions{
IsPull: util.OptionalBoolOf(isPullList),
SortType: sortType,
IsArchived: util.OptionalBoolFalse,
Org: org,
Team: team,
User: ctx.Doer,
}
// Search all repositories which
//
// As user:
// - Owns the repository.
// - Have collaborator permissions in repository.
//
// As org:
// - Owns the repository.
//
// As team:
// - Team org's owns the repository.
// - Team has read permission to repository.
repoOpts := &repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false,
AllLimited: false,
Collaborate: util.OptionalBoolNone,
UnitType: unitType,
Archived: util.OptionalBoolFalse,
}
if team != nil {
repoOpts.TeamID = team.ID
}
accessibleRepos := container.Set[int64]{}
{
ids, _, err := repo_model.SearchRepositoryIDs(repoOpts)
if err != nil {
ctx.ServerError("SearchRepositoryIDs", err)
return
}
accessibleRepos.AddMultiple(ids...)
opts.RepoIDs = ids
if len(opts.RepoIDs) == 0 {
// no repos found, don't let the indexer return all repos
opts.RepoIDs = []int64{0}
}
}
switch filterMode {
case issues_model.FilterModeAll:
case issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
opts.AssigneeID = ctx.Doer.ID
case issues_model.FilterModeCreate:
opts.PosterID = ctx.Doer.ID
case issues_model.FilterModeMention:
opts.MentionedID = ctx.Doer.ID
case issues_model.FilterModeReviewRequested:
opts.ReviewRequestedID = ctx.Doer.ID
case issues_model.FilterModeReviewed:
opts.ReviewedID = ctx.Doer.ID
}
// keyword holds the search term entered into the search field.
keyword := strings.Trim(ctx.FormString("q"), " ")
ctx.Data["Keyword"] = keyword
// Educated guess: Do or don't show closed issues.
isShowClosed := ctx.FormString("state") == "closed"
opts.IsClosed = util.OptionalBoolOf(isShowClosed)
// Filter repos and count issues in them. Count will be used later.
// USING NON-FINAL STATE OF opts FOR A QUERY.
issueCountByRepo, err := issue_indexer.CountIssuesByRepo(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("CountIssuesByRepo", err)
return
}
// Make sure page number is at least 1. Will be posted to ctx.Data.
page := ctx.FormInt("page")
if page <= 1 {
page = 1
}
opts.Paginator = &db.ListOptions{
Page: page,
PageSize: setting.UI.IssuePagingNum,
}
// Get IDs for labels (a filter option for issues/pulls).
// Required for IssuesOptions.
var labelIDs []int64
selectedLabels := ctx.FormString("labels")
if len(selectedLabels) > 0 && selectedLabels != "0" {
var err error
labelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
if err != nil |
}
opts.LabelIDs = labelIDs
// Parse ctx.FormString("repos") and remember matched repo IDs for later.
// Gets set when clicking filters on the issues overview page.
selectedRepoIDs := getRepoIDs(ctx.FormString("repos"))
// Remove repo IDs that are not accessible to the user.
selectedRepoIDs = util.SliceRemoveAllFunc(selectedRepoIDs, func(v int64) bool {
return !accessibleRepos.Contains(v)
})
if len(selectedRepoIDs) > 0 {
opts.RepoIDs = selectedRepoIDs
}
// ------------------------------
// Get issues as defined by opts.
// ------------------------------
// Slice of Issues that will be displayed on the overview page
// USING FINAL STATE OF opts FOR A QUERY.
var issues issues_model.IssueList
{
issueIDs, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("issueIDsFromSearch", err)
return
}
issues, err = issues_model.GetIssuesByIDs(ctx, issueIDs, true)
if err != nil {
ctx.ServerError("GetIssuesByIDs", err)
return
}
}
// ----------------------------------
// Add repository pointers to Issues.
// ----------------------------------
// Remove repositories that should not be shown,
// which are repositories that have no issues and are not selected by the user.
selectedRepos := container.SetOf(selectedRepoIDs...)
for k, v := range issueCountByRepo {
if v == 0 && !selectedRepos.Contains(k) {
delete(issueCountByRepo, k)
}
}
// showReposMap maps repository IDs to their Repository pointers.
showReposMap, err := loadRepoByIDs(ctxUser, issueCountByRepo, unitType)
if err != nil {
if repo_model.IsErrRepoNotExist(err) {
ctx.NotFound("GetRepositoryByID", err)
return
}
ctx.ServerError("loadRepoByIDs", err)
return
}
// a RepositoryList
showRepos := repo_model.RepositoryListOfMap(showReposMap)
sort.Sort(showRepos)
// maps pull request IDs to their CommitStatus. Will be posted to ctx.Data.
for _, issue := range issues {
if issue.Repo == nil {
issue.Repo = showReposMap[issue.RepoID]
}
}
commitStatuses, lastStatus, err := pull_service.GetIssuesAllCommitStatus(ctx, issues)
if err != nil {
ctx.ServerError("GetIssuesLastCommitStatus", err)
return
}
// -------------------------------
// Fill stats to post to ctx.Data.
// -------------------------------
issueStats, err := getUserIssueStats(ctx, filterMode, issue_indexer.ToSearchOptions(keyword, opts), ctx.Doer.ID)
if err != nil {
ctx.ServerError("getUserIssueStats", err)
return
}
// Will be posted to ctx.Data.
var shownIssues int
if !isShowClosed {
shownIssues = int(issueStats.OpenCount)
} else {
shownIssues = int(issueStats.ClosedCount)
}
if len(opts.RepoIDs) != 0 {
shownIssues = 0
for _, repoID := range opts.RepoIDs {
shownIssues += int(issueCountByRepo[repoID])
}
}
var allIssueCount int64
for _, issueCount := range issueCountByRepo {
allIssueCount += issueCount
}
ctx.Data["TotalIssueCount"] = allIssueCount
if len(opts.RepoIDs) == 1 {
repo := showReposMap[opts.RepoIDs[0]]
if repo != nil {
ctx.Data["SingleRepoLink"] = repo.Link()
}
}
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
if err := issues.LoadAttributes(ctx); err != nil {
ctx.ServerError("issues.LoadAttributes", err)
return
}
ctx.Data["Issues"] = issues
approvalCounts, err := issues.GetApprovalCounts(ctx)
if err != nil {
ctx.ServerError("ApprovalCounts", err)
return
}
ctx.Data["ApprovalCounts"] = func(issueID int64, typ string) int64 {
counts, ok := approvalCounts[issueID]
if !ok || len(counts) == 0 {
return 0
}
reviewTyp := issues_model.ReviewTypeApprove
if typ == "reject" {
reviewTyp = issues_model.ReviewTypeReject
} else if typ == "waiting" {
reviewTyp = issues_model.ReviewTypeRequest
}
for _, count := range counts {
if count.Type == reviewTyp {
return count.Count
}
}
return 0
}
ctx.Data["CommitLastStatus"] = lastStatus
ctx.Data["CommitStatuses"] = commitStatuses
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = issueCountByRepo
ctx.Data["IssueStats"] = issueStats
ctx.Data["ViewType"] = viewType
ctx.Data["SortType"] = sortType
ctx.Data["RepoIDs"] = selectedRepoIDs
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["SelectLabels"] = selectedLabels
if isShowClosed {
ctx.Data["State"] = "closed"
} else {
ctx.Data["State"] = "open"
}
// Convert []int64 to string
reposParam, _ := json.Marshal(opts.RepoIDs)
ctx.Data["ReposParam"] = string(reposParam)
pager := context.NewPagination(shownIssues, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "type", "ViewType")
pager.AddParam(ctx, "repos", "ReposParam")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
pager.AddParam(ctx, "labels", "SelectLabels")
pager.AddParam(ctx, "milestone", "MilestoneID")
pager.AddParam(ctx, "assignee", "AssigneeID")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplIssues)
}
func getRepoIDs(reposQuery string) []int64 {
if len(reposQuery) == 0 || reposQuery == "[]" {
return []int64{}
}
if !issueReposQueryPattern.MatchString(reposQuery) {
log.Warn("issueReposQueryPattern does not match query: %q", reposQuery)
return []int64{}
}
var repoIDs []int64
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
return repoIDs
}
func loadRepoByIDs(ctxUser *user_model.User, issueCountByRepo map[int64]int64, unitType unit.Type) (map[int64]*repo_model.Repository, error) {
totalRes := make(map[int64]*repo_model.Repository, len(issueCountByRepo))
repoIDs := make([]int64, 0, 500)
for id := range issueCountByRepo {
if id <= 0 {
continue
}
repoIDs = append(repoIDs, id)
if len(repoIDs) == 500 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
repoIDs = repoIDs[:0]
}
}
if len(repoIDs) > 0 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
}
return totalRes, nil
}
// ShowSSHKeys output all the ssh keys of user by uid
func ShowSSHKeys(ctx *context.Context) {
keys, err := asymkey_model.ListPublicKeys(ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListPublicKeys", err)
return
}
var buf bytes.Buffer
for i := range keys {
buf.WriteString(keys[i].OmitEmail())
buf.WriteString("\n")
}
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
// ShowGPGKeys output all the public GPG keys of user by uid
func ShowGPGKeys(ctx *context.Context) {
keys, err := asymkey_model.ListGPGKeys(ctx, ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListGPGKeys", err)
return
}
entities := make([]*openpgp.Entity, 0)
failedEntitiesID := make([]string, 0)
for _, k := range keys {
e, err := asymkey_model.GPGKeyToEntity(k)
if err != nil {
if asymkey_model.IsErrGPGKeyImportNotExist(err) {
failedEntitiesID = append(failedEntitiesID, k.KeyID)
continue // Skip previous import without backup of imported armored key
}
ctx.ServerError("ShowGPGKeys", err)
return
}
entities = append(entities, e)
}
var buf bytes.Buffer
headers := make(map[string]string)
if len(failedEntitiesID) > 0 { // If some key need re-import to be exported
headers["Note"] = fmt.Sprintf("The keys with the following IDs couldn't be exported and need to be reuploaded %s", strings.Join(failedEntitiesID, ", "))
} else if len(entities) == 0 {
headers["Note"] = "This user hasn't uploaded any GPG keys."
}
writer, _ := armor.Encode(&buf, "PGP PUBLIC KEY BLOCK", headers)
for _, e := range entities {
err = e.Serialize(writer) // TODO find why key are exported with a different cipherTypeByte as original (should not be blocking but strange)
if err != nil {
ctx.ServerError("ShowGPGKeys", err)
return
}
}
writer.Close()
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
func UsernameSubRoute(ctx *context.Context) {
// WORKAROUND to support usernames with "." in it
// https://github.com/go-chi/chi/issues/781
username := ctx.Params("username")
reloadParam := func(suffix string) (success bool) {
ctx.SetParams("username", strings.TrimSuffix(username, suffix))
context_service.UserAssignmentWeb()(ctx)
return !ctx.Written()
}
switch {
case strings.HasSuffix(username, ".png"):
if reloadParam(".png") {
AvatarByUserName(ctx)
}
case strings.HasSuffix(username, ".keys"):
if reloadParam(".keys") {
ShowSSHKeys(ctx)
}
case strings.HasSuffix(username, ".gpg"):
if reloadParam(".gpg") {
ShowGPGKeys(ctx)
}
case strings.HasSuffix(username, ".rss"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".rss") {
context_service.UserAssignmentWeb()(ctx)
feed.ShowUserFeedRSS(ctx)
}
case strings.HasSuffix(username, ".atom"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".atom") {
feed.ShowUserFeedAtom(ctx)
}
default:
context_service.UserAssignmentWeb()(ctx)
if !ctx.Written() {
ctx.Data["EnableFeed"] = setting.Other.EnableFeed
OwnerProfile(ctx)
}
}
}
func getUserIssueStats(ctx *context.Context, filterMode int, opts *issue_indexer.SearchOptions, doerID int64) (*issues_model.IssueStats, error) {
opts = opts.Copy(func(o *issue_indexer.SearchOptions) {
o.AssigneeID = nil
o.PosterID = nil
o.MentionID = nil
o.ReviewRequestedID = nil
o.ReviewedID = nil
})
var (
err error
ret = &issues_model.IssueStats{}
)
{
openClosedOpts := opts.Copy()
switch filterMode {
case issues_model.FilterModeAll, issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
openClosedOpts.AssigneeID = &doerID
case issues_model.FilterModeCreate:
openClosedOpts.PosterID = &doerID
case issues_model.FilterModeMention:
openClosedOpts.MentionID = &doerID
case issues_model.FilterModeReviewRequested:
openClosedOpts.ReviewRequestedID = &doerID
case issues_model.FilterModeReviewed:
openClosedOpts.ReviewedID = &doerID
}
openClosedOpts.IsClosed = util.OptionalBoolFalse
ret.OpenCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
openClosedOpts.IsClosed = util.OptionalBoolTrue
ret.ClosedCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
}
ret.YourRepositoriesCount, err = issue_indexer.CountIssues(ctx, opts)
if err != nil {
return nil, err
}
ret.AssignCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AssigneeID = &doerID }))
if err != nil {
return nil, err
}
ret.CreateCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.PosterID = &doerID }))
if err != nil {
return nil, err
}
ret.MentionCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.MentionID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewRequestedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewRequestedID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewedID = &doerID }))
if err != nil {
return nil, err
}
return ret, nil
}
| {
ctx.ServerError("StringsToInt64s", err)
return
} | conditional_block |
home.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package user
import (
"bytes"
"fmt"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
activities_model "code.gitea.io/gitea/models/activities"
asymkey_model "code.gitea.io/gitea/models/asymkey"
"code.gitea.io/gitea/models/db"
issues_model "code.gitea.io/gitea/models/issues"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/context"
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
"code.gitea.io/gitea/modules/markup/markdown"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/routers/web/feed"
context_service "code.gitea.io/gitea/services/context"
issue_service "code.gitea.io/gitea/services/issue"
pull_service "code.gitea.io/gitea/services/pull"
"github.com/keybase/go-crypto/openpgp"
"github.com/keybase/go-crypto/openpgp/armor"
"xorm.io/builder"
)
const (
tplDashboard base.TplName = "user/dashboard/dashboard"
tplIssues base.TplName = "user/dashboard/issues"
tplMilestones base.TplName = "user/dashboard/milestones"
tplProfile base.TplName = "user/profile"
)
// getDashboardContextUser finds out which context user dashboard is being viewed as .
func getDashboardContextUser(ctx *context.Context) *user_model.User {
ctxUser := ctx.Doer
orgName := ctx.Params(":org")
if len(orgName) > 0 {
ctxUser = ctx.Org.Organization.AsUser()
ctx.Data["Teams"] = ctx.Org.Teams
}
ctx.Data["ContextUser"] = ctxUser
orgs, err := organization.GetUserOrgsList(ctx.Doer)
if err != nil {
ctx.ServerError("GetUserOrgsList", err)
return nil
}
ctx.Data["Orgs"] = orgs
return ctxUser
}
// Dashboard render the dashboard page
func Dashboard(ctx *context.Context) {
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
date = ctx.FormString("date")
page = ctx.FormInt("page")
)
// Make sure page number is at least 1. Will be posted to ctx.Data.
if page <= 1 {
page = 1
}
ctx.Data["Title"] = ctxUser.DisplayName() + " - " + ctx.Tr("dashboard")
ctx.Data["PageIsDashboard"] = true
ctx.Data["PageIsNews"] = true
cnt, _ := organization.GetOrganizationCount(ctx, ctxUser)
ctx.Data["UserOrgsCount"] = cnt
ctx.Data["MirrorsEnabled"] = setting.Mirror.Enabled
ctx.Data["Date"] = date
var uid int64
if ctxUser != nil {
uid = ctxUser.ID
}
ctx.PageData["dashboardRepoList"] = map[string]any{
"searchLimit": setting.UI.User.RepoPagingNum,
"uid": uid,
}
if setting.Service.EnableUserHeatmap {
data, err := activities_model.GetUserHeatmapDataByUserTeam(ctxUser, ctx.Org.Team, ctx.Doer)
if err != nil {
ctx.ServerError("GetUserHeatmapDataByUserTeam", err)
return
}
ctx.Data["HeatmapData"] = data
ctx.Data["HeatmapTotalContributions"] = activities_model.GetTotalContributionsInHeatmap(data)
}
feeds, count, err := activities_model.GetFeeds(ctx, activities_model.GetFeedsOptions{
RequestedUser: ctxUser,
RequestedTeam: ctx.Org.Team,
Actor: ctx.Doer,
IncludePrivate: true,
OnlyPerformedBy: false,
IncludeDeleted: false,
Date: ctx.FormString("date"),
ListOptions: db.ListOptions{
Page: page,
PageSize: setting.UI.FeedPagingNum,
},
})
if err != nil {
ctx.ServerError("GetFeeds", err)
return
}
ctx.Data["Feeds"] = feeds
pager := context.NewPagination(int(count), setting.UI.FeedPagingNum, page, 5)
pager.AddParam(ctx, "date", "Date")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplDashboard)
}
// Milestones render the user milestones page
func Milestones(ctx *context.Context) {
if unit.TypeIssues.UnitGlobalDisabled() && unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Milestones overview page not available as both issues and pull requests are globally disabled")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("milestones")
ctx.Data["PageIsMilestonesDashboard"] = true
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
repoOpts := repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false, // Include also all public repositories of users and public organisations
AllLimited: false, // Include also all public repositories of limited organisations
Archived: util.OptionalBoolFalse,
HasMilestones: util.OptionalBoolTrue, // Just needs display repos has milestones
}
if ctxUser.IsOrganization() && ctx.Org.Team != nil {
repoOpts.TeamID = ctx.Org.Team.ID
}
var (
userRepoCond = repo_model.SearchRepositoryCondition(&repoOpts) // all repo condition user could visit
repoCond = userRepoCond
repoIDs []int64
reposQuery = ctx.FormString("repos")
isShowClosed = ctx.FormString("state") == "closed"
sortType = ctx.FormString("sort")
page = ctx.FormInt("page")
keyword = ctx.FormTrim("q")
)
if page <= 1 {
page = 1
}
if len(reposQuery) != 0 {
if issueReposQueryPattern.MatchString(reposQuery) {
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
// If the repo id specified by query is not parseable or not accessible by user, just ignore it.
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
if len(repoIDs) > 0 {
// Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs
// But the original repoCond has a limitation
repoCond = repoCond.And(builder.In("id", repoIDs))
}
} else {
log.Warn("issueReposQueryPattern not match with query")
}
}
counts, err := issues_model.CountMilestonesByRepoCondAndKw(userRepoCond, keyword, isShowClosed)
if err != nil {
ctx.ServerError("CountMilestonesByRepoIDs", err)
return
}
milestones, err := issues_model.SearchMilestones(repoCond, page, isShowClosed, sortType, keyword)
if err != nil {
ctx.ServerError("SearchMilestones", err)
return
}
showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false)
if err != nil {
ctx.ServerError("SearchRepositoryByCondition", err)
return
}
sort.Sort(showRepos)
for i := 0; i < len(milestones); {
for _, repo := range showRepos {
if milestones[i].RepoID == repo.ID {
milestones[i].Repo = repo
break
}
}
if milestones[i].Repo == nil {
log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID)
milestones = append(milestones[:i], milestones[i+1:]...)
continue
}
milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{
URLPrefix: milestones[i].Repo.Link(),
Metas: milestones[i].Repo.ComposeMetas(),
Ctx: ctx,
}, milestones[i].Content)
if err != nil {
ctx.ServerError("RenderString", err)
return
}
if milestones[i].Repo.IsTimetrackerEnabled(ctx) {
err := milestones[i].LoadTotalTrackedTime()
if err != nil {
ctx.ServerError("LoadTotalTrackedTime", err)
return
}
}
i++
}
milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(repoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
var totalMilestoneStats *issues_model.MilestonesStats
if len(repoIDs) == 0 {
totalMilestoneStats = milestoneStats
} else {
totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(userRepoCond, keyword)
if err != nil {
ctx.ServerError("GetMilestoneStats", err)
return
}
}
showRepoIds := make(container.Set[int64], len(showRepos))
for _, repo := range showRepos {
if repo.ID > 0 {
showRepoIds.Add(repo.ID)
}
}
if len(repoIDs) == 0 {
repoIDs = showRepoIds.Values()
}
repoIDs = util.SliceRemoveAllFunc(repoIDs, func(v int64) bool {
return !showRepoIds.Contains(v)
})
var pagerCount int
if isShowClosed {
ctx.Data["State"] = "closed"
ctx.Data["Total"] = totalMilestoneStats.ClosedCount
pagerCount = int(milestoneStats.ClosedCount)
} else {
ctx.Data["State"] = "open"
ctx.Data["Total"] = totalMilestoneStats.OpenCount
pagerCount = int(milestoneStats.OpenCount)
}
ctx.Data["Milestones"] = milestones
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = counts
ctx.Data["MilestoneStats"] = milestoneStats
ctx.Data["SortType"] = sortType
ctx.Data["Keyword"] = keyword
ctx.Data["RepoIDs"] = repoIDs
ctx.Data["IsShowClosed"] = isShowClosed
pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "repos", "RepoIDs")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplMilestones)
}
// Pulls renders the user's pull request overview page
func Pulls(ctx *context.Context) {
if unit.TypePullRequests.UnitGlobalDisabled() {
log.Debug("Pull request overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("pull_requests")
ctx.Data["PageIsPulls"] = true
ctx.Data["SingleRepoAction"] = "pull"
buildIssueOverview(ctx, unit.TypePullRequests)
}
// Issues renders the user's issues overview page
func Issues(ctx *context.Context) {
if unit.TypeIssues.UnitGlobalDisabled() {
log.Debug("Issues overview page not available as it is globally disabled.")
ctx.Status(http.StatusNotFound)
return
}
ctx.Data["Title"] = ctx.Tr("issues")
ctx.Data["PageIsIssues"] = true
ctx.Data["SingleRepoAction"] = "issue"
buildIssueOverview(ctx, unit.TypeIssues)
}
// Regexp for repos query
var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`)
func | (ctx *context.Context, unitType unit.Type) {
// ----------------------------------------------------
// Determine user; can be either user or organization.
// Return with NotFound or ServerError if unsuccessful.
// ----------------------------------------------------
ctxUser := getDashboardContextUser(ctx)
if ctx.Written() {
return
}
var (
viewType string
sortType = ctx.FormString("sort")
filterMode int
)
// Default to recently updated, unlike repository issues list
if sortType == "" {
sortType = "recentupdate"
}
// --------------------------------------------------------------------------------
// Distinguish User from Organization.
// Org:
// - Remember pre-determined viewType string for later. Will be posted to ctx.Data.
// Organization does not have view type and filter mode.
// User:
// - Use ctx.FormString("type") to determine filterMode.
// The type is set when clicking for example "assigned to me" on the overview page.
// - Remember either this or a fallback. Will be posted to ctx.Data.
// --------------------------------------------------------------------------------
// TODO: distinguish during routing
viewType = ctx.FormString("type")
switch viewType {
case "assigned":
filterMode = issues_model.FilterModeAssign
case "created_by":
filterMode = issues_model.FilterModeCreate
case "mentioned":
filterMode = issues_model.FilterModeMention
case "review_requested":
filterMode = issues_model.FilterModeReviewRequested
case "reviewed_by":
filterMode = issues_model.FilterModeReviewed
case "your_repositories":
fallthrough
default:
filterMode = issues_model.FilterModeYourRepositories
viewType = "your_repositories"
}
// --------------------------------------------------------------------------
// Build opts (IssuesOptions), which contains filter information.
// Will eventually be used to retrieve issues relevant for the overview page.
// Note: Non-final states of opts are used in-between, namely for:
// - Keyword search
// - Count Issues by repo
// --------------------------------------------------------------------------
// Get repository IDs where User/Org/Team has access.
var team *organization.Team
var org *organization.Organization
if ctx.Org != nil {
org = ctx.Org.Organization
team = ctx.Org.Team
}
isPullList := unitType == unit.TypePullRequests
opts := &issues_model.IssuesOptions{
IsPull: util.OptionalBoolOf(isPullList),
SortType: sortType,
IsArchived: util.OptionalBoolFalse,
Org: org,
Team: team,
User: ctx.Doer,
}
// Search all repositories which
//
// As user:
// - Owns the repository.
// - Have collaborator permissions in repository.
//
// As org:
// - Owns the repository.
//
// As team:
// - Team org's owns the repository.
// - Team has read permission to repository.
repoOpts := &repo_model.SearchRepoOptions{
Actor: ctx.Doer,
OwnerID: ctxUser.ID,
Private: true,
AllPublic: false,
AllLimited: false,
Collaborate: util.OptionalBoolNone,
UnitType: unitType,
Archived: util.OptionalBoolFalse,
}
if team != nil {
repoOpts.TeamID = team.ID
}
accessibleRepos := container.Set[int64]{}
{
ids, _, err := repo_model.SearchRepositoryIDs(repoOpts)
if err != nil {
ctx.ServerError("SearchRepositoryIDs", err)
return
}
accessibleRepos.AddMultiple(ids...)
opts.RepoIDs = ids
if len(opts.RepoIDs) == 0 {
// no repos found, don't let the indexer return all repos
opts.RepoIDs = []int64{0}
}
}
switch filterMode {
case issues_model.FilterModeAll:
case issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
opts.AssigneeID = ctx.Doer.ID
case issues_model.FilterModeCreate:
opts.PosterID = ctx.Doer.ID
case issues_model.FilterModeMention:
opts.MentionedID = ctx.Doer.ID
case issues_model.FilterModeReviewRequested:
opts.ReviewRequestedID = ctx.Doer.ID
case issues_model.FilterModeReviewed:
opts.ReviewedID = ctx.Doer.ID
}
// keyword holds the search term entered into the search field.
keyword := strings.Trim(ctx.FormString("q"), " ")
ctx.Data["Keyword"] = keyword
// Educated guess: Do or don't show closed issues.
isShowClosed := ctx.FormString("state") == "closed"
opts.IsClosed = util.OptionalBoolOf(isShowClosed)
// Filter repos and count issues in them. Count will be used later.
// USING NON-FINAL STATE OF opts FOR A QUERY.
issueCountByRepo, err := issue_indexer.CountIssuesByRepo(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("CountIssuesByRepo", err)
return
}
// Make sure page number is at least 1. Will be posted to ctx.Data.
page := ctx.FormInt("page")
if page <= 1 {
page = 1
}
opts.Paginator = &db.ListOptions{
Page: page,
PageSize: setting.UI.IssuePagingNum,
}
// Get IDs for labels (a filter option for issues/pulls).
// Required for IssuesOptions.
var labelIDs []int64
selectedLabels := ctx.FormString("labels")
if len(selectedLabels) > 0 && selectedLabels != "0" {
var err error
labelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
if err != nil {
ctx.ServerError("StringsToInt64s", err)
return
}
}
opts.LabelIDs = labelIDs
// Parse ctx.FormString("repos") and remember matched repo IDs for later.
// Gets set when clicking filters on the issues overview page.
selectedRepoIDs := getRepoIDs(ctx.FormString("repos"))
// Remove repo IDs that are not accessible to the user.
selectedRepoIDs = util.SliceRemoveAllFunc(selectedRepoIDs, func(v int64) bool {
return !accessibleRepos.Contains(v)
})
if len(selectedRepoIDs) > 0 {
opts.RepoIDs = selectedRepoIDs
}
// ------------------------------
// Get issues as defined by opts.
// ------------------------------
// Slice of Issues that will be displayed on the overview page
// USING FINAL STATE OF opts FOR A QUERY.
var issues issues_model.IssueList
{
issueIDs, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts))
if err != nil {
ctx.ServerError("issueIDsFromSearch", err)
return
}
issues, err = issues_model.GetIssuesByIDs(ctx, issueIDs, true)
if err != nil {
ctx.ServerError("GetIssuesByIDs", err)
return
}
}
// ----------------------------------
// Add repository pointers to Issues.
// ----------------------------------
// Remove repositories that should not be shown,
// which are repositories that have no issues and are not selected by the user.
selectedRepos := container.SetOf(selectedRepoIDs...)
for k, v := range issueCountByRepo {
if v == 0 && !selectedRepos.Contains(k) {
delete(issueCountByRepo, k)
}
}
// showReposMap maps repository IDs to their Repository pointers.
showReposMap, err := loadRepoByIDs(ctxUser, issueCountByRepo, unitType)
if err != nil {
if repo_model.IsErrRepoNotExist(err) {
ctx.NotFound("GetRepositoryByID", err)
return
}
ctx.ServerError("loadRepoByIDs", err)
return
}
// a RepositoryList
showRepos := repo_model.RepositoryListOfMap(showReposMap)
sort.Sort(showRepos)
// maps pull request IDs to their CommitStatus. Will be posted to ctx.Data.
for _, issue := range issues {
if issue.Repo == nil {
issue.Repo = showReposMap[issue.RepoID]
}
}
commitStatuses, lastStatus, err := pull_service.GetIssuesAllCommitStatus(ctx, issues)
if err != nil {
ctx.ServerError("GetIssuesLastCommitStatus", err)
return
}
// -------------------------------
// Fill stats to post to ctx.Data.
// -------------------------------
issueStats, err := getUserIssueStats(ctx, filterMode, issue_indexer.ToSearchOptions(keyword, opts), ctx.Doer.ID)
if err != nil {
ctx.ServerError("getUserIssueStats", err)
return
}
// Will be posted to ctx.Data.
var shownIssues int
if !isShowClosed {
shownIssues = int(issueStats.OpenCount)
} else {
shownIssues = int(issueStats.ClosedCount)
}
if len(opts.RepoIDs) != 0 {
shownIssues = 0
for _, repoID := range opts.RepoIDs {
shownIssues += int(issueCountByRepo[repoID])
}
}
var allIssueCount int64
for _, issueCount := range issueCountByRepo {
allIssueCount += issueCount
}
ctx.Data["TotalIssueCount"] = allIssueCount
if len(opts.RepoIDs) == 1 {
repo := showReposMap[opts.RepoIDs[0]]
if repo != nil {
ctx.Data["SingleRepoLink"] = repo.Link()
}
}
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
if err := issues.LoadAttributes(ctx); err != nil {
ctx.ServerError("issues.LoadAttributes", err)
return
}
ctx.Data["Issues"] = issues
approvalCounts, err := issues.GetApprovalCounts(ctx)
if err != nil {
ctx.ServerError("ApprovalCounts", err)
return
}
ctx.Data["ApprovalCounts"] = func(issueID int64, typ string) int64 {
counts, ok := approvalCounts[issueID]
if !ok || len(counts) == 0 {
return 0
}
reviewTyp := issues_model.ReviewTypeApprove
if typ == "reject" {
reviewTyp = issues_model.ReviewTypeReject
} else if typ == "waiting" {
reviewTyp = issues_model.ReviewTypeRequest
}
for _, count := range counts {
if count.Type == reviewTyp {
return count.Count
}
}
return 0
}
ctx.Data["CommitLastStatus"] = lastStatus
ctx.Data["CommitStatuses"] = commitStatuses
ctx.Data["Repos"] = showRepos
ctx.Data["Counts"] = issueCountByRepo
ctx.Data["IssueStats"] = issueStats
ctx.Data["ViewType"] = viewType
ctx.Data["SortType"] = sortType
ctx.Data["RepoIDs"] = selectedRepoIDs
ctx.Data["IsShowClosed"] = isShowClosed
ctx.Data["SelectLabels"] = selectedLabels
if isShowClosed {
ctx.Data["State"] = "closed"
} else {
ctx.Data["State"] = "open"
}
// Convert []int64 to string
reposParam, _ := json.Marshal(opts.RepoIDs)
ctx.Data["ReposParam"] = string(reposParam)
pager := context.NewPagination(shownIssues, setting.UI.IssuePagingNum, page, 5)
pager.AddParam(ctx, "q", "Keyword")
pager.AddParam(ctx, "type", "ViewType")
pager.AddParam(ctx, "repos", "ReposParam")
pager.AddParam(ctx, "sort", "SortType")
pager.AddParam(ctx, "state", "State")
pager.AddParam(ctx, "labels", "SelectLabels")
pager.AddParam(ctx, "milestone", "MilestoneID")
pager.AddParam(ctx, "assignee", "AssigneeID")
ctx.Data["Page"] = pager
ctx.HTML(http.StatusOK, tplIssues)
}
func getRepoIDs(reposQuery string) []int64 {
if len(reposQuery) == 0 || reposQuery == "[]" {
return []int64{}
}
if !issueReposQueryPattern.MatchString(reposQuery) {
log.Warn("issueReposQueryPattern does not match query: %q", reposQuery)
return []int64{}
}
var repoIDs []int64
// remove "[" and "]" from string
reposQuery = reposQuery[1 : len(reposQuery)-1]
// for each ID (delimiter ",") add to int to repoIDs
for _, rID := range strings.Split(reposQuery, ",") {
// Ensure nonempty string entries
if rID != "" && rID != "0" {
rIDint64, err := strconv.ParseInt(rID, 10, 64)
if err == nil {
repoIDs = append(repoIDs, rIDint64)
}
}
}
return repoIDs
}
func loadRepoByIDs(ctxUser *user_model.User, issueCountByRepo map[int64]int64, unitType unit.Type) (map[int64]*repo_model.Repository, error) {
totalRes := make(map[int64]*repo_model.Repository, len(issueCountByRepo))
repoIDs := make([]int64, 0, 500)
for id := range issueCountByRepo {
if id <= 0 {
continue
}
repoIDs = append(repoIDs, id)
if len(repoIDs) == 500 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
repoIDs = repoIDs[:0]
}
}
if len(repoIDs) > 0 {
if err := repo_model.FindReposMapByIDs(repoIDs, totalRes); err != nil {
return nil, err
}
}
return totalRes, nil
}
// ShowSSHKeys output all the ssh keys of user by uid
func ShowSSHKeys(ctx *context.Context) {
keys, err := asymkey_model.ListPublicKeys(ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListPublicKeys", err)
return
}
var buf bytes.Buffer
for i := range keys {
buf.WriteString(keys[i].OmitEmail())
buf.WriteString("\n")
}
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
// ShowGPGKeys output all the public GPG keys of user by uid
func ShowGPGKeys(ctx *context.Context) {
keys, err := asymkey_model.ListGPGKeys(ctx, ctx.ContextUser.ID, db.ListOptions{})
if err != nil {
ctx.ServerError("ListGPGKeys", err)
return
}
entities := make([]*openpgp.Entity, 0)
failedEntitiesID := make([]string, 0)
for _, k := range keys {
e, err := asymkey_model.GPGKeyToEntity(k)
if err != nil {
if asymkey_model.IsErrGPGKeyImportNotExist(err) {
failedEntitiesID = append(failedEntitiesID, k.KeyID)
continue // Skip previous import without backup of imported armored key
}
ctx.ServerError("ShowGPGKeys", err)
return
}
entities = append(entities, e)
}
var buf bytes.Buffer
headers := make(map[string]string)
if len(failedEntitiesID) > 0 { // If some key need re-import to be exported
headers["Note"] = fmt.Sprintf("The keys with the following IDs couldn't be exported and need to be reuploaded %s", strings.Join(failedEntitiesID, ", "))
} else if len(entities) == 0 {
headers["Note"] = "This user hasn't uploaded any GPG keys."
}
writer, _ := armor.Encode(&buf, "PGP PUBLIC KEY BLOCK", headers)
for _, e := range entities {
err = e.Serialize(writer) // TODO find why key are exported with a different cipherTypeByte as original (should not be blocking but strange)
if err != nil {
ctx.ServerError("ShowGPGKeys", err)
return
}
}
writer.Close()
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
}
func UsernameSubRoute(ctx *context.Context) {
// WORKAROUND to support usernames with "." in it
// https://github.com/go-chi/chi/issues/781
username := ctx.Params("username")
reloadParam := func(suffix string) (success bool) {
ctx.SetParams("username", strings.TrimSuffix(username, suffix))
context_service.UserAssignmentWeb()(ctx)
return !ctx.Written()
}
switch {
case strings.HasSuffix(username, ".png"):
if reloadParam(".png") {
AvatarByUserName(ctx)
}
case strings.HasSuffix(username, ".keys"):
if reloadParam(".keys") {
ShowSSHKeys(ctx)
}
case strings.HasSuffix(username, ".gpg"):
if reloadParam(".gpg") {
ShowGPGKeys(ctx)
}
case strings.HasSuffix(username, ".rss"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".rss") {
context_service.UserAssignmentWeb()(ctx)
feed.ShowUserFeedRSS(ctx)
}
case strings.HasSuffix(username, ".atom"):
if !setting.Other.EnableFeed {
ctx.Error(http.StatusNotFound)
return
}
if reloadParam(".atom") {
feed.ShowUserFeedAtom(ctx)
}
default:
context_service.UserAssignmentWeb()(ctx)
if !ctx.Written() {
ctx.Data["EnableFeed"] = setting.Other.EnableFeed
OwnerProfile(ctx)
}
}
}
func getUserIssueStats(ctx *context.Context, filterMode int, opts *issue_indexer.SearchOptions, doerID int64) (*issues_model.IssueStats, error) {
opts = opts.Copy(func(o *issue_indexer.SearchOptions) {
o.AssigneeID = nil
o.PosterID = nil
o.MentionID = nil
o.ReviewRequestedID = nil
o.ReviewedID = nil
})
var (
err error
ret = &issues_model.IssueStats{}
)
{
openClosedOpts := opts.Copy()
switch filterMode {
case issues_model.FilterModeAll, issues_model.FilterModeYourRepositories:
case issues_model.FilterModeAssign:
openClosedOpts.AssigneeID = &doerID
case issues_model.FilterModeCreate:
openClosedOpts.PosterID = &doerID
case issues_model.FilterModeMention:
openClosedOpts.MentionID = &doerID
case issues_model.FilterModeReviewRequested:
openClosedOpts.ReviewRequestedID = &doerID
case issues_model.FilterModeReviewed:
openClosedOpts.ReviewedID = &doerID
}
openClosedOpts.IsClosed = util.OptionalBoolFalse
ret.OpenCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
openClosedOpts.IsClosed = util.OptionalBoolTrue
ret.ClosedCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
if err != nil {
return nil, err
}
}
ret.YourRepositoriesCount, err = issue_indexer.CountIssues(ctx, opts)
if err != nil {
return nil, err
}
ret.AssignCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AssigneeID = &doerID }))
if err != nil {
return nil, err
}
ret.CreateCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.PosterID = &doerID }))
if err != nil {
return nil, err
}
ret.MentionCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.MentionID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewRequestedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewRequestedID = &doerID }))
if err != nil {
return nil, err
}
ret.ReviewedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewedID = &doerID }))
if err != nil {
return nil, err
}
return ret, nil
}
| buildIssueOverview | identifier_name |
lib.rs | #![allow(unused_imports)]
#[macro_use]
extern crate log;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use bytes::{BufMut, BytesMut};
use tokio::net::TcpStream;
use byteorder::{ByteOrder, BigEndian};
use futures::try_ready;
use futures::future::Either;
use tokio::prelude::*;
use chrono::naive::NaiveDateTime;
mod commcheck;
pub use commcheck::CommCheck;
#[derive(Clone)]
pub struct SprinklerOptions {
pub heart_beat: u64,
pub retry_delay: u64,
pub master_addr: String,
pub _id: usize,
pub _hostname: String,
}
impl Default for SprinklerOptions {
fn default() -> Self {
SprinklerOptions {
heart_beat: 3,
retry_delay: 20,
master_addr: String::from("localhost"),
_id: 0,
_hostname: String::from("localhost")
}
}
}
/// Sprinkler Builder
pub struct SprinklerBuilder {
params: SprinklerOptions,
counter: usize
}
impl SprinklerBuilder {
pub fn new(params: SprinklerOptions) -> Self {
SprinklerBuilder {
params,
counter: 0
}
}
}
impl SprinklerBuilder {
pub fn build<T: Sprinkler>(&mut self, hostname: String) -> T {
let next = self.counter;
self.counter += 1;
T::build(SprinklerOptions {
_id: next,
_hostname: hostname,
..self.params.clone()
})
}
}
#[cfg(feature = "master")]
type EncryptedStream = tokio_tls::TlsStream<TcpStream>;
/// A TCP stream adapter to convert between byte stream and objects
#[cfg(feature = "master")]
#[derive(Debug)]
pub struct SprinklerProto {
socket: EncryptedStream,
read_buffer: BytesMut,
}
#[cfg(feature = "master")]
impl SprinklerProto {
pub fn new(socket: EncryptedStream) -> Self {
SprinklerProto {
socket,
read_buffer: BytesMut::new(),
}
}
/// Update read buffer
fn check(&mut self) -> Poll<(), std::io::Error> {
loop { // Why do I have a loop here? I forgot??
self.read_buffer.reserve(512);
let n = try_ready!(self.socket.read_buf(&mut self.read_buffer));
if n == 0 {
return Ok(Async::Ready(()));
}
}
}
}
/// Encode a message and place it in a write buffer
pub fn compose_message(from: usize, msg: String) -> BytesMut {
let mut write_buffer = BytesMut::new();
write_buffer.reserve(512);
write_buffer.put_u16_be(from as u16);
write_buffer.put_i64_be(chrono::Local::now().timestamp());
write_buffer.put_u16_be(msg.len() as u16);
write_buffer.put(msg);
write_buffer
}
/// Message header
#[derive(Clone, Debug)]
pub struct SprinklerProtoHeader {
id: u16,
timestamp: i64,
len: u16
}
#[cfg(feature = "master")]
impl Stream for SprinklerProto {
type Item = SprinklerProtoHeader;
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let sock_closed = self.check()?.is_ready();
if self.read_buffer.len() > 12 {
Ok(Async::Ready(Some(SprinklerProtoHeader {
id: BigEndian::read_u16(&self.read_buffer.split_to(2)),
timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64,
len: BigEndian::read_u16(&self.read_buffer.split_to(2))
})))
}
else {
if sock_closed { Ok(Async::Ready(None)) }
else { Ok(Async::NotReady) }
}
}
}
#[derive(Clone)]
pub enum Transmitter<T> {
/// Synchronous Sender
Synchronous(std::sync::mpsc::Sender<T>),
/// Asynchronous Sender | pub fn send(&self, t: T) -> Result<(), ()> {
match self {
Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()),
Transmitter::Asynchronous(sender) => {
tokio::spawn({
let sender = sender.clone();
sender.send(t).into_future().map(|_| ()).map_err(|_| ())
});
Ok(())
}
}
}
}
#[derive(Clone)]
pub struct Switch {
pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>>
}
impl Switch {
pub fn new() -> Self {
Switch { inner: Arc::new(Mutex::new(HashMap::new())) }
}
pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) {
let mut switch_init = self.inner.lock().unwrap();
for i in sprinklers {
match i.activate_master() {
ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); },
ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); }
}
}
}
}
/// Message relay between master threads and TCP sockets connected to remote agents
#[cfg(feature = "master")]
pub struct SprinklerRelay {
pub proto: SprinklerProto,
pub header: SprinklerProtoHeader,
pub switch: Switch
}
#[cfg(feature = "master")]
impl Future for SprinklerRelay {
type Item = ();
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let sock_closed = self.proto.check()?.is_ready();
if self.proto.read_buffer.len() >= self.header.len as usize {
if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) {
if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) {
if let Err(_) = tx.send(Message{
timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0),
body: msgbody
}) {
warn!("Failed to relay the message.");
}
}
Ok(Async::Ready(()))
}
else {
warn!("Failed to decode message.");
Ok(Async::Ready(()))
}
}
else {
if sock_closed {
warn!("Message was lost.");
Ok(Async::Ready(()))
}
else { Ok(Async::NotReady) }
}
}
}
pub enum ActivationResult {
/// A realtime algorithm based master thread that monitors agent threads
RealtimeMonitor(std::sync::mpsc::Sender<Message>),
/// An asynchronous master thread that monitors agent threads
AsyncMonitor(futures::sync::mpsc::Sender<Message>)
}
/// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id.
/// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads,
/// so that there will not be a single point of failure.
/// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies.
/// The systemwide configuration is done by replicating the same config file and executable.
pub trait Sprinkler {
/// Build a new sprinkler
fn build(options: SprinklerOptions) -> Self where Self: Sized;
/// Get systemwide id
fn id(&self) -> usize;
/// Get the hostname, where the agent would be deployed
fn hostname(&self) -> &str;
/// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel
fn activate_master(&self) -> ActivationResult;
/// Start the agent thread
fn activate_agent(&self);
/// Kill the master thread. Note: there is no way to reach out and kill any agent threads.
fn deactivate(&self);
}
/// Sprinkler thread level message format
#[derive(Clone)]
pub struct Message {
pub timestamp: NaiveDateTime,
pub body: String
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum Anomaly {
Negative, // No anomaly has been detected
Positive, // Anomaly has occurred
Fixing(usize), // Has attempted to intervene N times
OutOfControl // Has given up trying because the programmed strategy will not work
}
impl Anomaly {
pub fn get_retry_unchecked(&self) -> usize {
match self {
Anomaly::Negative | Anomaly::Positive => 0,
Anomaly::Fixing(n) => *n,
Anomaly::OutOfControl => std::usize::MAX
}
}
pub fn escalate(&self, max_retry: usize) -> AnomalyTransition {
match self {
Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(),
Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(),
Anomaly::Fixing(n) => if *n < max_retry {
AnomalyTransition::Fixing
} else {
(*self >> Anomaly::OutOfControl).unwrap()
},
Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(),
}
}
pub fn diminish(&self) -> AnomalyTransition {
(*self >> Anomaly::Negative).unwrap()
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum AnomalyTransition {
Normal, // Negative -> Negative
Occurred, // Negative -> Positive
Unhandled, // Positive -> Positive
Disappeared, // Positive | OutOfControl -> Negative
Fixed, // Fixing(_) -> Negative
Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1)
GaveUp, // Fixing(m) -> OutOfControl
HasGivenUp // OutOfControl -> OutOfControl
}
use std::ops::Shr;
use std::ops::ShrAssign;
impl Shr for Anomaly {
type Output = Option<AnomalyTransition>;
fn shr(self, rhs: Self) -> Option<AnomalyTransition> {
match (self, rhs) {
(Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal),
(Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred),
(Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled),
(Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed),
(Anomaly::Fixing(_), Anomaly::OutOfControl) => Some(AnomalyTransition::GaveUp),
(Anomaly::OutOfControl, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::OutOfControl, Anomaly::OutOfControl) => Some(AnomalyTransition::HasGivenUp),
_ => None
}
}
}
impl Shr<AnomalyTransition> for Anomaly {
type Output = Anomaly;
fn shr(self, rhs: AnomalyTransition) -> Anomaly {
match (self, rhs) {
(Anomaly::Negative, AnomalyTransition::Occurred) => Anomaly::Positive,
(Anomaly::Positive, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::OutOfControl, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::Fixing(_), AnomalyTransition::Fixed) => Anomaly::Negative,
(Anomaly::Positive, AnomalyTransition::Fixing) => Anomaly::Fixing(1),
(Anomaly::Fixing(n), AnomalyTransition::Fixing) => Anomaly::Fixing(n+1),
(Anomaly::Fixing(_), AnomalyTransition::GaveUp) => Anomaly::OutOfControl,
_ => self
}
}
}
impl ShrAssign<AnomalyTransition> for Anomaly {
fn shr_assign(&mut self, rhs: AnomalyTransition) {
let next = *self >> rhs;
*self = next;
}
}
/// Create a TLS acceptor
#[cfg(feature = "master")]
fn init_tls() -> native_tls::Result<tokio_tls::TlsAcceptor> {
let der = include_bytes!("/etc/sprinkler.conf.d/master.p12");
// TODO key loading is hard coded.
let mut keybuffer = Vec::new();
std::fs::File::open("/root/.sprinkler.key").expect("cannot read key").read_to_end(&mut keybuffer).expect("cannot read key");
let cert = native_tls::Identity::from_pkcs12(der, &String::from_utf8_lossy(&keybuffer))?;
Ok(tokio_tls::TlsAcceptor::from(native_tls::TlsAcceptor::builder(cert).build()?))
}
/// Starts a tokio server bound to the specified address
#[cfg(feature = "master")]
pub fn server(addr: &std::net::SocketAddr, switch: &Switch) {
/*
Self-signed cert
openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out sprinkler.crt -keyout sprinkler.key
openssl pkcs12 -export -out identity.p12 -inkey sprinkler.key -in sprinkler.crt
echo "$KEY_PASSWORD" | tr -d '\n' > identity.txt
chown root:root identity.txt
chmod 600 identity.txt
*/
if let Ok(tls_acceptor) = init_tls() {
let listener = tokio::net::TcpListener::bind(addr).expect("unable to bind TCP listener");
let server = listener.incoming()
.map_err(|e| eprintln!("accept failed = {:?}", e))
.for_each({ let switch = switch.clone(); move |s| {
let tls_accept = tls_acceptor
.accept(s)
.and_then({ let switch = switch.clone(); move |s| {
let proto = SprinklerProto::new(s);
let handle_conn = proto.into_future()
.map_err(|(e, _)| e)
.and_then({ let switch = switch.clone(); move |(header, proto)| {
match header {
Some(header) => Either::A(SprinklerRelay{ proto, header, switch }),
None => Either::B(future::ok(())) // Connection dropped?
}
}})
// Task futures have an error of type `()`, this ensures we handle the
// error. We do this by printing the error to STDOUT.
.map_err(|e| {
error!("connection error = {:?}", e);
});
tokio::spawn(handle_conn);
Ok(())
}})
.map_err(|err| {
debug!("TLS accept error: {:?}", err);
});
tokio::spawn(tls_accept)
}});
tokio::spawn(server);
}
else {
error!("cannot initialize tls");
}
}
/// Activates sprinklers agents based on hostname
pub fn agent<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(sprinklers: I) {
if let Ok(hostname) = sys_info::hostname() {
for i in sprinklers.into_iter().filter(|&i| i.hostname() == hostname) {
i.activate_agent();
info!("sprinkler[{}] activated.", i.id());
}
}
else {
error!("Cannot obtain hostname.");
std::process::exit(-1);
}
}
pub fn loop_forever() -> ! {
loop { std::thread::sleep(std::time::Duration::from_secs(600)); }
} | Asynchronous(futures::sync::mpsc::Sender<T>)
}
impl<T> Transmitter<T> where T: 'static + Send {
/// Send a message through the underlying Sender | random_line_split |
lib.rs | #![allow(unused_imports)]
#[macro_use]
extern crate log;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use bytes::{BufMut, BytesMut};
use tokio::net::TcpStream;
use byteorder::{ByteOrder, BigEndian};
use futures::try_ready;
use futures::future::Either;
use tokio::prelude::*;
use chrono::naive::NaiveDateTime;
mod commcheck;
pub use commcheck::CommCheck;
#[derive(Clone)]
pub struct SprinklerOptions {
pub heart_beat: u64,
pub retry_delay: u64,
pub master_addr: String,
pub _id: usize,
pub _hostname: String,
}
impl Default for SprinklerOptions {
fn default() -> Self {
SprinklerOptions {
heart_beat: 3,
retry_delay: 20,
master_addr: String::from("localhost"),
_id: 0,
_hostname: String::from("localhost")
}
}
}
/// Sprinkler Builder
pub struct SprinklerBuilder {
params: SprinklerOptions,
counter: usize
}
impl SprinklerBuilder {
pub fn new(params: SprinklerOptions) -> Self {
SprinklerBuilder {
params,
counter: 0
}
}
}
impl SprinklerBuilder {
pub fn build<T: Sprinkler>(&mut self, hostname: String) -> T |
}
#[cfg(feature = "master")]
type EncryptedStream = tokio_tls::TlsStream<TcpStream>;
/// A TCP stream adapter to convert between byte stream and objects
#[cfg(feature = "master")]
#[derive(Debug)]
pub struct SprinklerProto {
socket: EncryptedStream,
read_buffer: BytesMut,
}
#[cfg(feature = "master")]
impl SprinklerProto {
pub fn new(socket: EncryptedStream) -> Self {
SprinklerProto {
socket,
read_buffer: BytesMut::new(),
}
}
/// Update read buffer
fn check(&mut self) -> Poll<(), std::io::Error> {
loop { // Why do I have a loop here? I forgot??
self.read_buffer.reserve(512);
let n = try_ready!(self.socket.read_buf(&mut self.read_buffer));
if n == 0 {
return Ok(Async::Ready(()));
}
}
}
}
/// Encode a message and place it in a write buffer
pub fn compose_message(from: usize, msg: String) -> BytesMut {
let mut write_buffer = BytesMut::new();
write_buffer.reserve(512);
write_buffer.put_u16_be(from as u16);
write_buffer.put_i64_be(chrono::Local::now().timestamp());
write_buffer.put_u16_be(msg.len() as u16);
write_buffer.put(msg);
write_buffer
}
/// Message header
#[derive(Clone, Debug)]
pub struct SprinklerProtoHeader {
id: u16,
timestamp: i64,
len: u16
}
#[cfg(feature = "master")]
impl Stream for SprinklerProto {
type Item = SprinklerProtoHeader;
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let sock_closed = self.check()?.is_ready();
if self.read_buffer.len() > 12 {
Ok(Async::Ready(Some(SprinklerProtoHeader {
id: BigEndian::read_u16(&self.read_buffer.split_to(2)),
timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64,
len: BigEndian::read_u16(&self.read_buffer.split_to(2))
})))
}
else {
if sock_closed { Ok(Async::Ready(None)) }
else { Ok(Async::NotReady) }
}
}
}
#[derive(Clone)]
pub enum Transmitter<T> {
/// Synchronous Sender
Synchronous(std::sync::mpsc::Sender<T>),
/// Asynchronous Sender
Asynchronous(futures::sync::mpsc::Sender<T>)
}
impl<T> Transmitter<T> where T: 'static + Send {
/// Send a message through the underlying Sender
pub fn send(&self, t: T) -> Result<(), ()> {
match self {
Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()),
Transmitter::Asynchronous(sender) => {
tokio::spawn({
let sender = sender.clone();
sender.send(t).into_future().map(|_| ()).map_err(|_| ())
});
Ok(())
}
}
}
}
#[derive(Clone)]
pub struct Switch {
pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>>
}
impl Switch {
pub fn new() -> Self {
Switch { inner: Arc::new(Mutex::new(HashMap::new())) }
}
pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) {
let mut switch_init = self.inner.lock().unwrap();
for i in sprinklers {
match i.activate_master() {
ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); },
ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); }
}
}
}
}
/// Message relay between master threads and TCP sockets connected to remote agents
#[cfg(feature = "master")]
pub struct SprinklerRelay {
pub proto: SprinklerProto,
pub header: SprinklerProtoHeader,
pub switch: Switch
}
#[cfg(feature = "master")]
impl Future for SprinklerRelay {
type Item = ();
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let sock_closed = self.proto.check()?.is_ready();
if self.proto.read_buffer.len() >= self.header.len as usize {
if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) {
if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) {
if let Err(_) = tx.send(Message{
timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0),
body: msgbody
}) {
warn!("Failed to relay the message.");
}
}
Ok(Async::Ready(()))
}
else {
warn!("Failed to decode message.");
Ok(Async::Ready(()))
}
}
else {
if sock_closed {
warn!("Message was lost.");
Ok(Async::Ready(()))
}
else { Ok(Async::NotReady) }
}
}
}
pub enum ActivationResult {
/// A realtime algorithm based master thread that monitors agent threads
RealtimeMonitor(std::sync::mpsc::Sender<Message>),
/// An asynchronous master thread that monitors agent threads
AsyncMonitor(futures::sync::mpsc::Sender<Message>)
}
/// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id.
/// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads,
/// so that there will not be a single point of failure.
/// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies.
/// The systemwide configuration is done by replicating the same config file and executable.
pub trait Sprinkler {
/// Build a new sprinkler
fn build(options: SprinklerOptions) -> Self where Self: Sized;
/// Get systemwide id
fn id(&self) -> usize;
/// Get the hostname, where the agent would be deployed
fn hostname(&self) -> &str;
/// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel
fn activate_master(&self) -> ActivationResult;
/// Start the agent thread
fn activate_agent(&self);
/// Kill the master thread. Note: there is no way to reach out and kill any agent threads.
fn deactivate(&self);
}
/// Sprinkler thread level message format
#[derive(Clone)]
pub struct Message {
pub timestamp: NaiveDateTime,
pub body: String
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum Anomaly {
Negative, // No anomaly has been detected
Positive, // Anomaly has occurred
Fixing(usize), // Has attempted to intervene N times
OutOfControl // Has given up trying because the programmed strategy will not work
}
impl Anomaly {
pub fn get_retry_unchecked(&self) -> usize {
match self {
Anomaly::Negative | Anomaly::Positive => 0,
Anomaly::Fixing(n) => *n,
Anomaly::OutOfControl => std::usize::MAX
}
}
pub fn escalate(&self, max_retry: usize) -> AnomalyTransition {
match self {
Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(),
Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(),
Anomaly::Fixing(n) => if *n < max_retry {
AnomalyTransition::Fixing
} else {
(*self >> Anomaly::OutOfControl).unwrap()
},
Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(),
}
}
pub fn diminish(&self) -> AnomalyTransition {
(*self >> Anomaly::Negative).unwrap()
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum AnomalyTransition {
Normal, // Negative -> Negative
Occurred, // Negative -> Positive
Unhandled, // Positive -> Positive
Disappeared, // Positive | OutOfControl -> Negative
Fixed, // Fixing(_) -> Negative
Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1)
GaveUp, // Fixing(m) -> OutOfControl
HasGivenUp // OutOfControl -> OutOfControl
}
use std::ops::Shr;
use std::ops::ShrAssign;
impl Shr for Anomaly {
type Output = Option<AnomalyTransition>;
fn shr(self, rhs: Self) -> Option<AnomalyTransition> {
match (self, rhs) {
(Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal),
(Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred),
(Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled),
(Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed),
(Anomaly::Fixing(_), Anomaly::OutOfControl) => Some(AnomalyTransition::GaveUp),
(Anomaly::OutOfControl, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::OutOfControl, Anomaly::OutOfControl) => Some(AnomalyTransition::HasGivenUp),
_ => None
}
}
}
impl Shr<AnomalyTransition> for Anomaly {
type Output = Anomaly;
fn shr(self, rhs: AnomalyTransition) -> Anomaly {
match (self, rhs) {
(Anomaly::Negative, AnomalyTransition::Occurred) => Anomaly::Positive,
(Anomaly::Positive, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::OutOfControl, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::Fixing(_), AnomalyTransition::Fixed) => Anomaly::Negative,
(Anomaly::Positive, AnomalyTransition::Fixing) => Anomaly::Fixing(1),
(Anomaly::Fixing(n), AnomalyTransition::Fixing) => Anomaly::Fixing(n+1),
(Anomaly::Fixing(_), AnomalyTransition::GaveUp) => Anomaly::OutOfControl,
_ => self
}
}
}
impl ShrAssign<AnomalyTransition> for Anomaly {
fn shr_assign(&mut self, rhs: AnomalyTransition) {
let next = *self >> rhs;
*self = next;
}
}
/// Create a TLS acceptor
#[cfg(feature = "master")]
fn init_tls() -> native_tls::Result<tokio_tls::TlsAcceptor> {
let der = include_bytes!("/etc/sprinkler.conf.d/master.p12");
// TODO key loading is hard coded.
let mut keybuffer = Vec::new();
std::fs::File::open("/root/.sprinkler.key").expect("cannot read key").read_to_end(&mut keybuffer).expect("cannot read key");
let cert = native_tls::Identity::from_pkcs12(der, &String::from_utf8_lossy(&keybuffer))?;
Ok(tokio_tls::TlsAcceptor::from(native_tls::TlsAcceptor::builder(cert).build()?))
}
/// Starts a tokio server bound to the specified address
#[cfg(feature = "master")]
pub fn server(addr: &std::net::SocketAddr, switch: &Switch) {
/*
Self-signed cert
openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out sprinkler.crt -keyout sprinkler.key
openssl pkcs12 -export -out identity.p12 -inkey sprinkler.key -in sprinkler.crt
echo "$KEY_PASSWORD" | tr -d '\n' > identity.txt
chown root:root identity.txt
chmod 600 identity.txt
*/
if let Ok(tls_acceptor) = init_tls() {
let listener = tokio::net::TcpListener::bind(addr).expect("unable to bind TCP listener");
let server = listener.incoming()
.map_err(|e| eprintln!("accept failed = {:?}", e))
.for_each({ let switch = switch.clone(); move |s| {
let tls_accept = tls_acceptor
.accept(s)
.and_then({ let switch = switch.clone(); move |s| {
let proto = SprinklerProto::new(s);
let handle_conn = proto.into_future()
.map_err(|(e, _)| e)
.and_then({ let switch = switch.clone(); move |(header, proto)| {
match header {
Some(header) => Either::A(SprinklerRelay{ proto, header, switch }),
None => Either::B(future::ok(())) // Connection dropped?
}
}})
// Task futures have an error of type `()`, this ensures we handle the
// error. We do this by printing the error to STDOUT.
.map_err(|e| {
error!("connection error = {:?}", e);
});
tokio::spawn(handle_conn);
Ok(())
}})
.map_err(|err| {
debug!("TLS accept error: {:?}", err);
});
tokio::spawn(tls_accept)
}});
tokio::spawn(server);
}
else {
error!("cannot initialize tls");
}
}
/// Activates sprinklers agents based on hostname
pub fn agent<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(sprinklers: I) {
if let Ok(hostname) = sys_info::hostname() {
for i in sprinklers.into_iter().filter(|&i| i.hostname() == hostname) {
i.activate_agent();
info!("sprinkler[{}] activated.", i.id());
}
}
else {
error!("Cannot obtain hostname.");
std::process::exit(-1);
}
}
pub fn loop_forever() -> ! {
loop { std::thread::sleep(std::time::Duration::from_secs(600)); }
}
| {
let next = self.counter;
self.counter += 1;
T::build(SprinklerOptions {
_id: next,
_hostname: hostname,
..self.params.clone()
})
} | identifier_body |
lib.rs | #![allow(unused_imports)]
#[macro_use]
extern crate log;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use bytes::{BufMut, BytesMut};
use tokio::net::TcpStream;
use byteorder::{ByteOrder, BigEndian};
use futures::try_ready;
use futures::future::Either;
use tokio::prelude::*;
use chrono::naive::NaiveDateTime;
mod commcheck;
pub use commcheck::CommCheck;
#[derive(Clone)]
pub struct SprinklerOptions {
pub heart_beat: u64,
pub retry_delay: u64,
pub master_addr: String,
pub _id: usize,
pub _hostname: String,
}
impl Default for SprinklerOptions {
fn default() -> Self {
SprinklerOptions {
heart_beat: 3,
retry_delay: 20,
master_addr: String::from("localhost"),
_id: 0,
_hostname: String::from("localhost")
}
}
}
/// Sprinkler Builder
pub struct SprinklerBuilder {
params: SprinklerOptions,
counter: usize
}
impl SprinklerBuilder {
pub fn new(params: SprinklerOptions) -> Self {
SprinklerBuilder {
params,
counter: 0
}
}
}
impl SprinklerBuilder {
pub fn build<T: Sprinkler>(&mut self, hostname: String) -> T {
let next = self.counter;
self.counter += 1;
T::build(SprinklerOptions {
_id: next,
_hostname: hostname,
..self.params.clone()
})
}
}
#[cfg(feature = "master")]
type EncryptedStream = tokio_tls::TlsStream<TcpStream>;
/// A TCP stream adapter to convert between byte stream and objects
#[cfg(feature = "master")]
#[derive(Debug)]
pub struct SprinklerProto {
socket: EncryptedStream,
read_buffer: BytesMut,
}
#[cfg(feature = "master")]
impl SprinklerProto {
pub fn new(socket: EncryptedStream) -> Self {
SprinklerProto {
socket,
read_buffer: BytesMut::new(),
}
}
/// Update read buffer
fn check(&mut self) -> Poll<(), std::io::Error> {
loop { // Why do I have a loop here? I forgot??
self.read_buffer.reserve(512);
let n = try_ready!(self.socket.read_buf(&mut self.read_buffer));
if n == 0 {
return Ok(Async::Ready(()));
}
}
}
}
/// Encode a message and place it in a write buffer
pub fn compose_message(from: usize, msg: String) -> BytesMut {
let mut write_buffer = BytesMut::new();
write_buffer.reserve(512);
write_buffer.put_u16_be(from as u16);
write_buffer.put_i64_be(chrono::Local::now().timestamp());
write_buffer.put_u16_be(msg.len() as u16);
write_buffer.put(msg);
write_buffer
}
/// Message header
#[derive(Clone, Debug)]
pub struct SprinklerProtoHeader {
id: u16,
timestamp: i64,
len: u16
}
#[cfg(feature = "master")]
impl Stream for SprinklerProto {
type Item = SprinklerProtoHeader;
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let sock_closed = self.check()?.is_ready();
if self.read_buffer.len() > 12 {
Ok(Async::Ready(Some(SprinklerProtoHeader {
id: BigEndian::read_u16(&self.read_buffer.split_to(2)),
timestamp: BigEndian::read_u64(&self.read_buffer.split_to(8)) as i64,
len: BigEndian::read_u16(&self.read_buffer.split_to(2))
})))
}
else {
if sock_closed { Ok(Async::Ready(None)) }
else { Ok(Async::NotReady) }
}
}
}
#[derive(Clone)]
pub enum Transmitter<T> {
/// Synchronous Sender
Synchronous(std::sync::mpsc::Sender<T>),
/// Asynchronous Sender
Asynchronous(futures::sync::mpsc::Sender<T>)
}
impl<T> Transmitter<T> where T: 'static + Send {
/// Send a message through the underlying Sender
pub fn send(&self, t: T) -> Result<(), ()> {
match self {
Transmitter::Synchronous(sender) => sender.send(t).map_err(|_| ()),
Transmitter::Asynchronous(sender) => {
tokio::spawn({
let sender = sender.clone();
sender.send(t).into_future().map(|_| ()).map_err(|_| ())
});
Ok(())
}
}
}
}
#[derive(Clone)]
pub struct Switch {
pub inner: Arc<Mutex<HashMap<usize, Transmitter<Message>>>>
}
impl Switch {
pub fn new() -> Self {
Switch { inner: Arc::new(Mutex::new(HashMap::new())) }
}
pub fn connect_all<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(&self, sprinklers: I) {
let mut switch_init = self.inner.lock().unwrap();
for i in sprinklers {
match i.activate_master() {
ActivationResult::RealtimeMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Synchronous(monitor)); },
ActivationResult::AsyncMonitor(monitor) => { switch_init.insert(i.id(), Transmitter::Asynchronous(monitor)); }
}
}
}
}
/// Message relay between master threads and TCP sockets connected to remote agents
#[cfg(feature = "master")]
pub struct SprinklerRelay {
pub proto: SprinklerProto,
pub header: SprinklerProtoHeader,
pub switch: Switch
}
#[cfg(feature = "master")]
impl Future for SprinklerRelay {
type Item = ();
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let sock_closed = self.proto.check()?.is_ready();
if self.proto.read_buffer.len() >= self.header.len as usize {
if let Ok(msgbody) = String::from_utf8(self.proto.read_buffer.to_vec()) {
if let Some(tx) = self.switch.inner.lock().unwrap().get(&(self.header.id as usize)) {
if let Err(_) = tx.send(Message{
timestamp: NaiveDateTime::from_timestamp(self.header.timestamp, 0),
body: msgbody
}) {
warn!("Failed to relay the message.");
}
}
Ok(Async::Ready(()))
}
else {
warn!("Failed to decode message.");
Ok(Async::Ready(()))
}
}
else {
if sock_closed {
warn!("Message was lost.");
Ok(Async::Ready(()))
}
else { Ok(Async::NotReady) }
}
}
}
pub enum ActivationResult {
/// A realtime algorithm based master thread that monitors agent threads
RealtimeMonitor(std::sync::mpsc::Sender<Message>),
/// An asynchronous master thread that monitors agent threads
AsyncMonitor(futures::sync::mpsc::Sender<Message>)
}
/// DoS prevention mechanisms, which are consisted of distributed agent threads monitored by master threads, identifiable by a systemwide id.
/// The agent threads, at a remote location, will independently detect system anomalies and intervene while notifying master threads,
/// so that there will not be a single point of failure.
/// The master threads, gathered at a single reachable networking endpoint, may participate in DoS prevention from a control plane angle or only record system anomalies.
/// The systemwide configuration is done by replicating the same config file and executable.
pub trait Sprinkler {
/// Build a new sprinkler
fn build(options: SprinklerOptions) -> Self where Self: Sized;
/// Get systemwide id
fn id(&self) -> usize;
/// Get the hostname, where the agent would be deployed
fn hostname(&self) -> &str;
/// Start the master thread, returning a sender (to the master thread) on a intraprocess communication channel
fn activate_master(&self) -> ActivationResult;
/// Start the agent thread
fn activate_agent(&self);
/// Kill the master thread. Note: there is no way to reach out and kill any agent threads.
fn deactivate(&self);
}
/// Sprinkler thread level message format
#[derive(Clone)]
pub struct Message {
pub timestamp: NaiveDateTime,
pub body: String
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum Anomaly {
Negative, // No anomaly has been detected
Positive, // Anomaly has occurred
Fixing(usize), // Has attempted to intervene N times
OutOfControl // Has given up trying because the programmed strategy will not work
}
impl Anomaly {
pub fn get_retry_unchecked(&self) -> usize {
match self {
Anomaly::Negative | Anomaly::Positive => 0,
Anomaly::Fixing(n) => *n,
Anomaly::OutOfControl => std::usize::MAX
}
}
pub fn escalate(&self, max_retry: usize) -> AnomalyTransition {
match self {
Anomaly::Negative => (*self >> Anomaly::Positive).unwrap(),
Anomaly::Positive => (*self >> Anomaly::Fixing(1)).unwrap(),
Anomaly::Fixing(n) => if *n < max_retry {
AnomalyTransition::Fixing
} else {
(*self >> Anomaly::OutOfControl).unwrap()
},
Anomaly::OutOfControl => (*self >> Anomaly::OutOfControl).unwrap(),
}
}
pub fn | (&self) -> AnomalyTransition {
(*self >> Anomaly::Negative).unwrap()
}
}
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum AnomalyTransition {
Normal, // Negative -> Negative
Occurred, // Negative -> Positive
Unhandled, // Positive -> Positive
Disappeared, // Positive | OutOfControl -> Negative
Fixed, // Fixing(_) -> Negative
Fixing, // Positive -> Fixing(1) || Fixing(n) -> Fixing(n+1)
GaveUp, // Fixing(m) -> OutOfControl
HasGivenUp // OutOfControl -> OutOfControl
}
use std::ops::Shr;
use std::ops::ShrAssign;
impl Shr for Anomaly {
type Output = Option<AnomalyTransition>;
fn shr(self, rhs: Self) -> Option<AnomalyTransition> {
match (self, rhs) {
(Anomaly::Negative, Anomaly::Negative) => Some(AnomalyTransition::Normal),
(Anomaly::Negative, Anomaly::Positive) => Some(AnomalyTransition::Occurred),
(Anomaly::Positive, Anomaly::Positive) => Some(AnomalyTransition::Unhandled),
(Anomaly::Positive, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::Positive, Anomaly::Fixing(1)) => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(i), Anomaly::Fixing(j)) if i+1==j => Some(AnomalyTransition::Fixing),
(Anomaly::Fixing(_), Anomaly::Negative) => Some(AnomalyTransition::Fixed),
(Anomaly::Fixing(_), Anomaly::OutOfControl) => Some(AnomalyTransition::GaveUp),
(Anomaly::OutOfControl, Anomaly::Negative) => Some(AnomalyTransition::Disappeared),
(Anomaly::OutOfControl, Anomaly::OutOfControl) => Some(AnomalyTransition::HasGivenUp),
_ => None
}
}
}
impl Shr<AnomalyTransition> for Anomaly {
type Output = Anomaly;
fn shr(self, rhs: AnomalyTransition) -> Anomaly {
match (self, rhs) {
(Anomaly::Negative, AnomalyTransition::Occurred) => Anomaly::Positive,
(Anomaly::Positive, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::OutOfControl, AnomalyTransition::Disappeared) => Anomaly::Negative,
(Anomaly::Fixing(_), AnomalyTransition::Fixed) => Anomaly::Negative,
(Anomaly::Positive, AnomalyTransition::Fixing) => Anomaly::Fixing(1),
(Anomaly::Fixing(n), AnomalyTransition::Fixing) => Anomaly::Fixing(n+1),
(Anomaly::Fixing(_), AnomalyTransition::GaveUp) => Anomaly::OutOfControl,
_ => self
}
}
}
impl ShrAssign<AnomalyTransition> for Anomaly {
fn shr_assign(&mut self, rhs: AnomalyTransition) {
let next = *self >> rhs;
*self = next;
}
}
/// Create a TLS acceptor
#[cfg(feature = "master")]
fn init_tls() -> native_tls::Result<tokio_tls::TlsAcceptor> {
let der = include_bytes!("/etc/sprinkler.conf.d/master.p12");
// TODO key loading is hard coded.
let mut keybuffer = Vec::new();
std::fs::File::open("/root/.sprinkler.key").expect("cannot read key").read_to_end(&mut keybuffer).expect("cannot read key");
let cert = native_tls::Identity::from_pkcs12(der, &String::from_utf8_lossy(&keybuffer))?;
Ok(tokio_tls::TlsAcceptor::from(native_tls::TlsAcceptor::builder(cert).build()?))
}
/// Starts a tokio server bound to the specified address
#[cfg(feature = "master")]
pub fn server(addr: &std::net::SocketAddr, switch: &Switch) {
/*
Self-signed cert
openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out sprinkler.crt -keyout sprinkler.key
openssl pkcs12 -export -out identity.p12 -inkey sprinkler.key -in sprinkler.crt
echo "$KEY_PASSWORD" | tr -d '\n' > identity.txt
chown root:root identity.txt
chmod 600 identity.txt
*/
if let Ok(tls_acceptor) = init_tls() {
let listener = tokio::net::TcpListener::bind(addr).expect("unable to bind TCP listener");
let server = listener.incoming()
.map_err(|e| eprintln!("accept failed = {:?}", e))
.for_each({ let switch = switch.clone(); move |s| {
let tls_accept = tls_acceptor
.accept(s)
.and_then({ let switch = switch.clone(); move |s| {
let proto = SprinklerProto::new(s);
let handle_conn = proto.into_future()
.map_err(|(e, _)| e)
.and_then({ let switch = switch.clone(); move |(header, proto)| {
match header {
Some(header) => Either::A(SprinklerRelay{ proto, header, switch }),
None => Either::B(future::ok(())) // Connection dropped?
}
}})
// Task futures have an error of type `()`, this ensures we handle the
// error. We do this by printing the error to STDOUT.
.map_err(|e| {
error!("connection error = {:?}", e);
});
tokio::spawn(handle_conn);
Ok(())
}})
.map_err(|err| {
debug!("TLS accept error: {:?}", err);
});
tokio::spawn(tls_accept)
}});
tokio::spawn(server);
}
else {
error!("cannot initialize tls");
}
}
/// Activates sprinklers agents based on hostname
pub fn agent<'a, I: IntoIterator<Item=&'a Box<dyn Sprinkler>> + Copy>(sprinklers: I) {
if let Ok(hostname) = sys_info::hostname() {
for i in sprinklers.into_iter().filter(|&i| i.hostname() == hostname) {
i.activate_agent();
info!("sprinkler[{}] activated.", i.id());
}
}
else {
error!("Cannot obtain hostname.");
std::process::exit(-1);
}
}
pub fn loop_forever() -> ! {
loop { std::thread::sleep(std::time::Duration::from_secs(600)); }
}
| diminish | identifier_name |
lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use anyhow::{anyhow, bail, Result};
use crossbeam::channel::Sender;
use glob::glob;
use log::{info, warn};
use scan_fmt::scan_fmt;
use simplelog as sl;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::fmt::Write as FmtWrite;
use std::fs;
use std::io::prelude::*;
use std::io::BufReader;
use std::mem::size_of;
use std::os::linux::fs::MetadataExt as LinuxME;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::MetadataExt as UnixME;
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process::{self, Command};
use std::sync::{atomic, Condvar, Mutex};
use std::thread_local;
use std::time::{Duration, UNIX_EPOCH};
use sysinfo::{self, SystemExt};
pub mod iocost;
pub mod journal_tailer;
pub mod json_file;
pub mod storage_info;
pub mod systemd;
pub use iocost::{IoCostModelParams, IoCostQoSParams, IoCostSysSave};
pub use journal_tailer::*;
pub use json_file::{
JsonArgs, JsonArgsHelper, JsonConfigFile, JsonLoad, JsonRawFile, JsonReportFile, JsonSave,
};
pub use storage_info::*;
pub use systemd::TransientService;
pub const TO_MSEC: f64 = 1000.0;
pub const TO_PCT: f64 = 100.0;
pub const MSEC: f64 = 1.0 / 1000.0;
lazy_static::lazy_static! {
pub static ref TOTAL_SYSTEM_MEMORY: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_memory() as usize * 1024
};
pub static ref TOTAL_SYSTEM_SWAP: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_swap() as usize * 1024
};
pub static ref NR_SYSTEM_CPUS: usize = ::num_cpus::get();
static ref TOTAL_MEMORY: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref TOTAL_SWAP: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref NR_CPUS: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
pub static ref PAGE_SIZE: usize = ::page_size::get();
pub static ref ROTATIONAL_SWAP: bool = storage_info::is_swap_rotational();
pub static ref IS_FB_PROD: bool = {
match glob("/sys/fs/cgroup/**/fbagentd.service")
.unwrap()
.filter_map(|x| x.ok())
.next()
{
Some(_) => {
warn!("FB PROD detected, default parameters will be adjusted");
true
}
None => false,
}
};
}
pub fn total_memory() -> usize {
match TOTAL_MEMORY.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_MEMORY,
v => v,
}
}
pub fn total_swap() -> usize {
match TOTAL_SWAP.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_SWAP,
v => v,
}
}
pub fn nr_cpus() -> usize {
match NR_CPUS.load(atomic::Ordering::Relaxed) {
0 => *NR_SYSTEM_CPUS,
v => v,
}
}
pub fn override_system_configuration(
total_memory: Option<usize>,
total_swap: Option<usize>,
nr_cpus: Option<usize>,
) {
let total_memory = total_memory.unwrap_or(0);
let total_swap = total_swap.unwrap_or(0);
let nr_cpus = nr_cpus.unwrap_or(0);
TOTAL_MEMORY.store(total_memory, atomic::Ordering::Relaxed);
TOTAL_SWAP.store(total_swap, atomic::Ordering::Relaxed);
NR_CPUS.store(nr_cpus, atomic::Ordering::Relaxed);
let mut buf = String::new();
if total_memory > 0 {
write!(
buf,
" memory={}->{}",
format_size(*TOTAL_SYSTEM_MEMORY),
format_size(total_memory)
)
.unwrap();
}
if total_swap > 0 {
write!(
buf,
" swap={}->{}",
format_size(*TOTAL_SYSTEM_SWAP),
format_size(total_swap)
)
.unwrap();
}
if nr_cpus > 0 {
write!(buf, " cpus={}->{}", *NR_SYSTEM_CPUS, nr_cpus).unwrap();
}
if buf.len() > 0 {
info!("System configuration overrides:{}", &buf);
}
}
pub fn to_gb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 30) as f64
}
pub fn to_mb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 20) as f64
}
pub fn to_kb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 10) as f64
}
pub fn scale_ratio<T>(ratio: f64, (left, mid, right): (T, T, T)) -> T
where
T: PartialOrd + num::FromPrimitive + num::ToPrimitive,
{
let (left_f64, mid_f64, right_f64) = (
left.to_f64().unwrap(),
mid.to_f64().unwrap(),
right.to_f64().unwrap(),
);
let v = if ratio < 0.5 {
left_f64 + (mid_f64 - left_f64) * ratio / 0.5
} else {
mid_f64 + (right_f64 - mid_f64) * (ratio - 0.5) / 0.5
};
num::clamp(T::from_f64(v).unwrap(), left, right)
}
fn format_size_internal<T>(size: T, zero: &str) -> String
where
T: num::ToPrimitive,
{
let format_size_helper = |size: u64, shift: u32, suffix: &str| -> Option<String> {
let unit: u64 = 1 << shift;
if size < unit {
Some(zero.to_string())
} else if size < 100 * unit {
Some(format!("{:.1}{}", size as f64 / unit as f64, suffix))
} else if size < 1024 * unit {
Some(format!("{:}{}", size / unit, suffix))
} else {
None
}
};
let size = size.to_u64().unwrap();
format_size_helper(size, 0, "B")
.or_else(|| format_size_helper(size, 10, "K"))
.or_else(|| format_size_helper(size, 20, "M"))
.or_else(|| format_size_helper(size, 30, "G"))
.or_else(|| format_size_helper(size, 40, "P"))
.or_else(|| format_size_helper(size, 50, "E"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_size<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "0")
}
pub fn format_size_dashed<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "-")
}
fn format_duration_internal(dur: f64, zero: &str) -> String {
let format_nsecs_helper = |nsecs: u64, unit: u64, max: u64, suffix: &str| -> Option<String> {
if nsecs < unit {
Some(zero.to_string())
} else if nsecs < 100 * unit {
Some(format!("{:.1}{}", nsecs as f64 / unit as f64, suffix))
} else if nsecs < max * unit {
Some(format!("{:}{}", nsecs / unit, suffix))
} else {
None
}
};
let nsecs = (dur * 1_000_000_000.0).round() as u64;
format_nsecs_helper(nsecs, 10_u64.pow(0), 1000, "n")
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(3), 1000, "u"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(6), 1000, "m"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9), 60, "s"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60, 60, "M"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60, 24, "H"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24, 365, "D"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24 * 365, 1000, "Y"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_duration(dur: f64) -> String {
format_duration_internal(dur, "0")
}
pub fn format_duration_dashed(dur: f64) -> String {
format_duration_internal(dur, "-")
}
fn format_pct_internal(ratio: f64, zero: &str) -> String {
if ratio == 0.0 {
zero.to_string()
} else if ratio > 0.99 && ratio <= 9.99 {
format!("{:3.0}", ratio * 100.0)
} else if ratio > 9.99 {
"INF".into()
} else {
format!("{:.01}", ratio * 100.0)
}
}
pub fn format_pct(ratio: f64) -> String {
format_pct_internal(ratio, "0")
}
pub fn format_pct_dashed(ratio: f64) -> String {
format_pct_internal(ratio, "-")
}
pub fn parse_duration(input: &str) -> Result<f64> {
lazy_static::lazy_static! {
static ref UNITS: HashMap<char, f64> = [
('n', 0.000_000_001),
('u', 0.000_001),
('m', 0.001),
('s', 1.0),
('M', 60.0),
('H', 3600.0),
('D', 3600.0 * 24.0),
('Y', 3600.0 * 24.0 * 365.0),
]
.iter()
.cloned()
.collect();
}
let mut num = String::new();
let mut sum = 0.0;
for ch in input.chars() {
if UNITS.contains_key(&ch) {
sum += num.trim().parse::<f64>()? * UNITS[&ch];
num.clear();
} else {
num.push(ch);
}
}
if num.trim().len() > 0 {
sum += num.trim().parse::<f64>()?;
}
Ok(sum)
}
fn is_executable<P: AsRef<Path>>(path_in: P) -> bool {
let path = path_in.as_ref();
match path.metadata() {
Ok(md) => md.is_file() && md.mode() & 0o111 != 0,
Err(_) => false,
}
}
pub fn exe_dir() -> Result<PathBuf> {
let mut path = env::current_exe()?;
path.pop();
Ok(path)
}
pub fn find_bin<N: AsRef<OsStr>, P: AsRef<OsStr>>(
name_in: N,
prepend_in: Option<P>,
) -> Option<PathBuf> {
let name = name_in.as_ref();
let mut search = OsString::new();
if let Some(prepend) = prepend_in.as_ref() {
search.push(prepend);
search.push(":");
}
if let Some(dirs) = env::var_os("PATH") {
search.push(dirs);
}
for dir in env::split_paths(&search) { | let mut path = dir.to_owned();
path.push(name);
if let Ok(path) = path.canonicalize() {
if is_executable(&path) {
return Some(path);
}
}
}
None
}
pub fn chgrp<P: AsRef<Path>>(path_in: P, gid: u32) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
if md.st_gid() != gid {
let cpath = CString::new(path.as_os_str().as_bytes())?;
if unsafe { libc::chown(cpath.as_ptr(), md.st_uid(), gid) } < 0 {
bail!("Failed to chgrp {:?} to {} ({:?})", path, gid, unsafe {
*libc::__errno_location()
});
}
Ok(true)
} else {
Ok(false)
}
}
pub fn set_sgid<P: AsRef<Path>>(path_in: P) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
let mut perm = md.permissions();
if perm.mode() & 0o2000 == 0 {
perm.set_mode(perm.mode() | 0o2000);
fs::set_permissions(path, perm)?;
Ok(true)
} else {
Ok(false)
}
}
pub fn read_one_line<P: AsRef<Path>>(path: P) -> Result<String> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
Ok(r.lines().next().ok_or(anyhow!("File empty"))??)
}
pub fn write_one_line<P: AsRef<Path>>(path: P, line: &str) -> Result<()> {
let mut f = fs::OpenOptions::new().write(true).open(path)?;
Ok(f.write_all(line.as_ref())?)
}
pub fn unix_now() -> u64 {
UNIX_EPOCH.elapsed().unwrap().as_secs()
}
pub fn init_logging(verbosity: u32) {
if std::env::var("RUST_LOG").is_ok() {
env_logger::init();
} else {
let sl_level = match verbosity {
0 | 1 => sl::LevelFilter::Info,
2 => sl::LevelFilter::Debug,
_ => sl::LevelFilter::Trace,
};
let mut lcfg = sl::ConfigBuilder::new();
lcfg.set_time_level(sl::LevelFilter::Off)
.set_location_level(sl::LevelFilter::Off)
.set_target_level(sl::LevelFilter::Off)
.set_thread_level(sl::LevelFilter::Off);
if !console::user_attended_stderr()
|| sl::TermLogger::init(sl_level, lcfg.build(), sl::TerminalMode::Stderr).is_err()
{
sl::SimpleLogger::init(sl_level, lcfg.build()).unwrap();
}
}
}
pub fn child_reader_thread(name: String, stdout: process::ChildStdout, tx: Sender<String>) {
let reader = BufReader::new(stdout);
for line in reader.lines() {
match line {
Ok(line) => {
if let Err(e) = tx.send(line) {
info!("{}: Reader thread terminating ({:?})", &name, &e);
break;
}
}
Err(e) => {
warn!("{}: Failed to read from journalctl ({:?})", &name, &e);
break;
}
}
}
}
pub fn run_command(cmd: &mut Command, emsg: &str) -> Result<()> {
let cmd_str = format!("{:?}", &cmd);
match cmd.status() {
Ok(rc) if rc.success() => Ok(()),
Ok(rc) => bail!("{:?} ({:?}): {}", &cmd_str, &rc, emsg,),
Err(e) => bail!("{:?} ({:?}): {}", &cmd_str, &e, emsg,),
}
}
pub fn fill_area_with_random<T, R: rand::Rng + ?Sized>(area: &mut [T], comp: f64, rng: &mut R) {
let area = unsafe {
std::slice::from_raw_parts_mut(
std::mem::transmute::<*mut T, *mut u64>(area.as_mut_ptr()),
area.len() * size_of::<T>() / size_of::<u64>(),
)
};
const BLOCK_SIZE: usize = 512;
const WORDS_PER_BLOCK: usize = BLOCK_SIZE / size_of::<u64>();
let rands_per_block = (((WORDS_PER_BLOCK as f64) * (1.0 - comp)) as usize).min(WORDS_PER_BLOCK);
let last_first = area[0];
for i in 0..area.len() {
area[i] = if i % WORDS_PER_BLOCK < rands_per_block {
rng.gen()
} else {
0
};
}
// guarantee that the first word doesn't stay the same
if area[0] == last_first {
area[0] += 1;
}
}
pub fn read_cgroup_flat_keyed_file(path: &str) -> Result<HashMap<String, u64>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
if let Ok((key, val)) = scan_fmt!(&line, "{} {d}", String, u64) {
map.insert(key, val);
}
}
Ok(map)
}
pub fn read_cgroup_nested_keyed_file(
path: &str,
) -> Result<HashMap<String, HashMap<String, String>>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut top_map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
let mut split = line.split_whitespace();
let top_key = split.next().unwrap();
let mut map = HashMap::new();
for tok in split {
if let Ok((key, val)) = scan_fmt!(tok, "{}={}", String, String) {
map.insert(key, val);
}
}
top_map.insert(top_key.into(), map);
}
Ok(top_map)
}
struct GlobalProgState {
exiting: bool,
kick_seq: u64,
}
lazy_static::lazy_static! {
static ref PROG_STATE: Mutex<GlobalProgState> = Mutex::new(GlobalProgState {
exiting: false,
kick_seq: 1
});
static ref PROG_WAITQ: Condvar = Condvar::new();
}
thread_local! {
static LOCAL_KICK_SEQ: RefCell<u64> = RefCell::new(0);
}
pub fn setup_prog_state() {
ctrlc::set_handler(move || {
info!("SIGINT/TERM received, exiting...");
set_prog_exiting();
})
.expect("Error setting term handler");
}
pub fn set_prog_exiting() {
PROG_STATE.lock().unwrap().exiting = true;
PROG_WAITQ.notify_all();
}
pub fn prog_exiting() -> bool {
PROG_STATE.lock().unwrap().exiting
}
pub fn prog_kick() {
PROG_STATE.lock().unwrap().kick_seq += 1;
PROG_WAITQ.notify_all();
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ProgState {
Running,
Exiting,
Kicked,
}
pub fn wait_prog_state(dur: Duration) -> ProgState {
let mut first = true;
let mut state = PROG_STATE.lock().unwrap();
loop {
if state.exiting {
return ProgState::Exiting;
}
if LOCAL_KICK_SEQ.with(|seq| {
if *seq.borrow() < state.kick_seq {
*seq.borrow_mut() = state.kick_seq;
true
} else {
false
}
}) {
return ProgState::Kicked;
}
if first {
state = PROG_WAITQ.wait_timeout(state, dur).unwrap().0;
first = false;
} else {
return ProgState::Running;
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_format_duration() {
for pair in &[
(0.000003932, "3.9u"),
(0.00448, "4.5m"),
(0.3, "300m"),
(2042.0, "34.0M"),
(3456000.0, "40.0D"),
(60480000.0, "1.9Y"),
] {
let result = super::format_duration(pair.0);
assert_eq!(&result, pair.1);
println!("{} -> {} ({})", pair.0, &result, pair.1);
}
}
#[test]
fn test_parse_duration() {
for pair in &[
(0.0000039, "3.9u"),
(0.0044, "4.4m"),
(0.3, "300m"),
(2040.0, "34.0M"),
(3456000.0, "40.0D"),
(59918400.0, "1.9Y"),
(59918401.1, "1.9Y1s100m"),
(59918401.1, "1.9Y1.1s"),
(59918401.102, "1.9Y 1.1s 2000 u"),
(1.27, "1.27"),
(1.37, "100m1.27"),
] {
let result = super::parse_duration(pair.1).unwrap();
assert_eq!(pair.0, result);
println!("{} -> {} ({})", pair.1, result, pair.0);
}
}
} | random_line_split |
|
lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use anyhow::{anyhow, bail, Result};
use crossbeam::channel::Sender;
use glob::glob;
use log::{info, warn};
use scan_fmt::scan_fmt;
use simplelog as sl;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::fmt::Write as FmtWrite;
use std::fs;
use std::io::prelude::*;
use std::io::BufReader;
use std::mem::size_of;
use std::os::linux::fs::MetadataExt as LinuxME;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::MetadataExt as UnixME;
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process::{self, Command};
use std::sync::{atomic, Condvar, Mutex};
use std::thread_local;
use std::time::{Duration, UNIX_EPOCH};
use sysinfo::{self, SystemExt};
pub mod iocost;
pub mod journal_tailer;
pub mod json_file;
pub mod storage_info;
pub mod systemd;
pub use iocost::{IoCostModelParams, IoCostQoSParams, IoCostSysSave};
pub use journal_tailer::*;
pub use json_file::{
JsonArgs, JsonArgsHelper, JsonConfigFile, JsonLoad, JsonRawFile, JsonReportFile, JsonSave,
};
pub use storage_info::*;
pub use systemd::TransientService;
pub const TO_MSEC: f64 = 1000.0;
pub const TO_PCT: f64 = 100.0;
pub const MSEC: f64 = 1.0 / 1000.0;
lazy_static::lazy_static! {
pub static ref TOTAL_SYSTEM_MEMORY: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_memory() as usize * 1024
};
pub static ref TOTAL_SYSTEM_SWAP: usize = {
let mut sys = sysinfo::System::new();
sys.refresh_memory();
sys.get_total_swap() as usize * 1024
};
pub static ref NR_SYSTEM_CPUS: usize = ::num_cpus::get();
static ref TOTAL_MEMORY: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref TOTAL_SWAP: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
static ref NR_CPUS: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
pub static ref PAGE_SIZE: usize = ::page_size::get();
pub static ref ROTATIONAL_SWAP: bool = storage_info::is_swap_rotational();
pub static ref IS_FB_PROD: bool = {
match glob("/sys/fs/cgroup/**/fbagentd.service")
.unwrap()
.filter_map(|x| x.ok())
.next()
{
Some(_) => {
warn!("FB PROD detected, default parameters will be adjusted");
true
}
None => false,
}
};
}
pub fn total_memory() -> usize {
match TOTAL_MEMORY.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_MEMORY,
v => v,
}
}
pub fn total_swap() -> usize {
match TOTAL_SWAP.load(atomic::Ordering::Relaxed) {
0 => *TOTAL_SYSTEM_SWAP,
v => v,
}
}
pub fn nr_cpus() -> usize {
match NR_CPUS.load(atomic::Ordering::Relaxed) {
0 => *NR_SYSTEM_CPUS,
v => v,
}
}
pub fn override_system_configuration(
total_memory: Option<usize>,
total_swap: Option<usize>,
nr_cpus: Option<usize>,
) {
let total_memory = total_memory.unwrap_or(0);
let total_swap = total_swap.unwrap_or(0);
let nr_cpus = nr_cpus.unwrap_or(0);
TOTAL_MEMORY.store(total_memory, atomic::Ordering::Relaxed);
TOTAL_SWAP.store(total_swap, atomic::Ordering::Relaxed);
NR_CPUS.store(nr_cpus, atomic::Ordering::Relaxed);
let mut buf = String::new();
if total_memory > 0 {
write!(
buf,
" memory={}->{}",
format_size(*TOTAL_SYSTEM_MEMORY),
format_size(total_memory)
)
.unwrap();
}
if total_swap > 0 {
write!(
buf,
" swap={}->{}",
format_size(*TOTAL_SYSTEM_SWAP),
format_size(total_swap)
)
.unwrap();
}
if nr_cpus > 0 {
write!(buf, " cpus={}->{}", *NR_SYSTEM_CPUS, nr_cpus).unwrap();
}
if buf.len() > 0 {
info!("System configuration overrides:{}", &buf);
}
}
pub fn to_gb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 30) as f64
}
pub fn to_mb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 20) as f64
}
pub fn to_kb<T>(size: T) -> f64
where
T: num::ToPrimitive,
{
let size_f64 = size.to_f64().unwrap();
size_f64 / (1 << 10) as f64
}
pub fn scale_ratio<T>(ratio: f64, (left, mid, right): (T, T, T)) -> T
where
T: PartialOrd + num::FromPrimitive + num::ToPrimitive,
{
let (left_f64, mid_f64, right_f64) = (
left.to_f64().unwrap(),
mid.to_f64().unwrap(),
right.to_f64().unwrap(),
);
let v = if ratio < 0.5 {
left_f64 + (mid_f64 - left_f64) * ratio / 0.5
} else {
mid_f64 + (right_f64 - mid_f64) * (ratio - 0.5) / 0.5
};
num::clamp(T::from_f64(v).unwrap(), left, right)
}
fn format_size_internal<T>(size: T, zero: &str) -> String
where
T: num::ToPrimitive,
{
let format_size_helper = |size: u64, shift: u32, suffix: &str| -> Option<String> {
let unit: u64 = 1 << shift;
if size < unit {
Some(zero.to_string())
} else if size < 100 * unit {
Some(format!("{:.1}{}", size as f64 / unit as f64, suffix))
} else if size < 1024 * unit {
Some(format!("{:}{}", size / unit, suffix))
} else {
None
}
};
let size = size.to_u64().unwrap();
format_size_helper(size, 0, "B")
.or_else(|| format_size_helper(size, 10, "K"))
.or_else(|| format_size_helper(size, 20, "M"))
.or_else(|| format_size_helper(size, 30, "G"))
.or_else(|| format_size_helper(size, 40, "P"))
.or_else(|| format_size_helper(size, 50, "E"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_size<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "0")
}
pub fn format_size_dashed<T>(size: T) -> String
where
T: num::ToPrimitive,
{
format_size_internal(size, "-")
}
fn format_duration_internal(dur: f64, zero: &str) -> String {
let format_nsecs_helper = |nsecs: u64, unit: u64, max: u64, suffix: &str| -> Option<String> {
if nsecs < unit {
Some(zero.to_string())
} else if nsecs < 100 * unit {
Some(format!("{:.1}{}", nsecs as f64 / unit as f64, suffix))
} else if nsecs < max * unit {
Some(format!("{:}{}", nsecs / unit, suffix))
} else {
None
}
};
let nsecs = (dur * 1_000_000_000.0).round() as u64;
format_nsecs_helper(nsecs, 10_u64.pow(0), 1000, "n")
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(3), 1000, "u"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(6), 1000, "m"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9), 60, "s"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60, 60, "M"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60, 24, "H"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24, 365, "D"))
.or_else(|| format_nsecs_helper(nsecs, 10_u64.pow(9) * 60 * 60 * 24 * 365, 1000, "Y"))
.unwrap_or_else(|| "INF".into())
}
pub fn format_duration(dur: f64) -> String {
format_duration_internal(dur, "0")
}
pub fn format_duration_dashed(dur: f64) -> String {
format_duration_internal(dur, "-")
}
fn format_pct_internal(ratio: f64, zero: &str) -> String {
if ratio == 0.0 {
zero.to_string()
} else if ratio > 0.99 && ratio <= 9.99 {
format!("{:3.0}", ratio * 100.0)
} else if ratio > 9.99 {
"INF".into()
} else {
format!("{:.01}", ratio * 100.0)
}
}
pub fn format_pct(ratio: f64) -> String {
format_pct_internal(ratio, "0")
}
pub fn format_pct_dashed(ratio: f64) -> String {
format_pct_internal(ratio, "-")
}
pub fn parse_duration(input: &str) -> Result<f64> {
lazy_static::lazy_static! {
static ref UNITS: HashMap<char, f64> = [
('n', 0.000_000_001),
('u', 0.000_001),
('m', 0.001),
('s', 1.0),
('M', 60.0),
('H', 3600.0),
('D', 3600.0 * 24.0),
('Y', 3600.0 * 24.0 * 365.0),
]
.iter()
.cloned()
.collect();
}
let mut num = String::new();
let mut sum = 0.0;
for ch in input.chars() {
if UNITS.contains_key(&ch) {
sum += num.trim().parse::<f64>()? * UNITS[&ch];
num.clear();
} else {
num.push(ch);
}
}
if num.trim().len() > 0 {
sum += num.trim().parse::<f64>()?;
}
Ok(sum)
}
fn is_executable<P: AsRef<Path>>(path_in: P) -> bool {
let path = path_in.as_ref();
match path.metadata() {
Ok(md) => md.is_file() && md.mode() & 0o111 != 0,
Err(_) => false,
}
}
pub fn exe_dir() -> Result<PathBuf> {
let mut path = env::current_exe()?;
path.pop();
Ok(path)
}
pub fn find_bin<N: AsRef<OsStr>, P: AsRef<OsStr>>(
name_in: N,
prepend_in: Option<P>,
) -> Option<PathBuf> {
let name = name_in.as_ref();
let mut search = OsString::new();
if let Some(prepend) = prepend_in.as_ref() {
search.push(prepend);
search.push(":");
}
if let Some(dirs) = env::var_os("PATH") {
search.push(dirs);
}
for dir in env::split_paths(&search) {
let mut path = dir.to_owned();
path.push(name);
if let Ok(path) = path.canonicalize() {
if is_executable(&path) {
return Some(path);
}
}
}
None
}
pub fn chgrp<P: AsRef<Path>>(path_in: P, gid: u32) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
if md.st_gid() != gid {
let cpath = CString::new(path.as_os_str().as_bytes())?;
if unsafe { libc::chown(cpath.as_ptr(), md.st_uid(), gid) } < 0 {
bail!("Failed to chgrp {:?} to {} ({:?})", path, gid, unsafe {
*libc::__errno_location()
});
}
Ok(true)
} else {
Ok(false)
}
}
pub fn set_sgid<P: AsRef<Path>>(path_in: P) -> Result<bool> {
let path = path_in.as_ref();
let md = fs::metadata(path)?;
let mut perm = md.permissions();
if perm.mode() & 0o2000 == 0 {
perm.set_mode(perm.mode() | 0o2000);
fs::set_permissions(path, perm)?;
Ok(true)
} else {
Ok(false)
}
}
pub fn read_one_line<P: AsRef<Path>>(path: P) -> Result<String> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
Ok(r.lines().next().ok_or(anyhow!("File empty"))??)
}
pub fn write_one_line<P: AsRef<Path>>(path: P, line: &str) -> Result<()> {
let mut f = fs::OpenOptions::new().write(true).open(path)?;
Ok(f.write_all(line.as_ref())?)
}
pub fn unix_now() -> u64 {
UNIX_EPOCH.elapsed().unwrap().as_secs()
}
pub fn init_logging(verbosity: u32) {
if std::env::var("RUST_LOG").is_ok() {
env_logger::init();
} else {
let sl_level = match verbosity {
0 | 1 => sl::LevelFilter::Info,
2 => sl::LevelFilter::Debug,
_ => sl::LevelFilter::Trace,
};
let mut lcfg = sl::ConfigBuilder::new();
lcfg.set_time_level(sl::LevelFilter::Off)
.set_location_level(sl::LevelFilter::Off)
.set_target_level(sl::LevelFilter::Off)
.set_thread_level(sl::LevelFilter::Off);
if !console::user_attended_stderr()
|| sl::TermLogger::init(sl_level, lcfg.build(), sl::TerminalMode::Stderr).is_err()
{
sl::SimpleLogger::init(sl_level, lcfg.build()).unwrap();
}
}
}
pub fn child_reader_thread(name: String, stdout: process::ChildStdout, tx: Sender<String>) {
let reader = BufReader::new(stdout);
for line in reader.lines() {
match line {
Ok(line) => {
if let Err(e) = tx.send(line) {
info!("{}: Reader thread terminating ({:?})", &name, &e);
break;
}
}
Err(e) => {
warn!("{}: Failed to read from journalctl ({:?})", &name, &e);
break;
}
}
}
}
pub fn run_command(cmd: &mut Command, emsg: &str) -> Result<()> {
let cmd_str = format!("{:?}", &cmd);
match cmd.status() {
Ok(rc) if rc.success() => Ok(()),
Ok(rc) => bail!("{:?} ({:?}): {}", &cmd_str, &rc, emsg,),
Err(e) => bail!("{:?} ({:?}): {}", &cmd_str, &e, emsg,),
}
}
pub fn fill_area_with_random<T, R: rand::Rng + ?Sized>(area: &mut [T], comp: f64, rng: &mut R) {
let area = unsafe {
std::slice::from_raw_parts_mut(
std::mem::transmute::<*mut T, *mut u64>(area.as_mut_ptr()),
area.len() * size_of::<T>() / size_of::<u64>(),
)
};
const BLOCK_SIZE: usize = 512;
const WORDS_PER_BLOCK: usize = BLOCK_SIZE / size_of::<u64>();
let rands_per_block = (((WORDS_PER_BLOCK as f64) * (1.0 - comp)) as usize).min(WORDS_PER_BLOCK);
let last_first = area[0];
for i in 0..area.len() {
area[i] = if i % WORDS_PER_BLOCK < rands_per_block {
rng.gen()
} else {
0
};
}
// guarantee that the first word doesn't stay the same
if area[0] == last_first {
area[0] += 1;
}
}
pub fn read_cgroup_flat_keyed_file(path: &str) -> Result<HashMap<String, u64>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
if let Ok((key, val)) = scan_fmt!(&line, "{} {d}", String, u64) {
map.insert(key, val);
}
}
Ok(map)
}
pub fn read_cgroup_nested_keyed_file(
path: &str,
) -> Result<HashMap<String, HashMap<String, String>>> {
let f = fs::OpenOptions::new().read(true).open(path)?;
let r = BufReader::new(f);
let mut top_map = HashMap::new();
for line in r.lines().filter_map(Result::ok) {
let mut split = line.split_whitespace();
let top_key = split.next().unwrap();
let mut map = HashMap::new();
for tok in split {
if let Ok((key, val)) = scan_fmt!(tok, "{}={}", String, String) {
map.insert(key, val);
}
}
top_map.insert(top_key.into(), map);
}
Ok(top_map)
}
struct GlobalProgState {
exiting: bool,
kick_seq: u64,
}
lazy_static::lazy_static! {
static ref PROG_STATE: Mutex<GlobalProgState> = Mutex::new(GlobalProgState {
exiting: false,
kick_seq: 1
});
static ref PROG_WAITQ: Condvar = Condvar::new();
}
thread_local! {
static LOCAL_KICK_SEQ: RefCell<u64> = RefCell::new(0);
}
pub fn setup_prog_state() {
ctrlc::set_handler(move || {
info!("SIGINT/TERM received, exiting...");
set_prog_exiting();
})
.expect("Error setting term handler");
}
pub fn set_prog_exiting() {
PROG_STATE.lock().unwrap().exiting = true;
PROG_WAITQ.notify_all();
}
pub fn prog_exiting() -> bool {
PROG_STATE.lock().unwrap().exiting
}
pub fn prog_kick() {
PROG_STATE.lock().unwrap().kick_seq += 1;
PROG_WAITQ.notify_all();
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum | {
Running,
Exiting,
Kicked,
}
pub fn wait_prog_state(dur: Duration) -> ProgState {
let mut first = true;
let mut state = PROG_STATE.lock().unwrap();
loop {
if state.exiting {
return ProgState::Exiting;
}
if LOCAL_KICK_SEQ.with(|seq| {
if *seq.borrow() < state.kick_seq {
*seq.borrow_mut() = state.kick_seq;
true
} else {
false
}
}) {
return ProgState::Kicked;
}
if first {
state = PROG_WAITQ.wait_timeout(state, dur).unwrap().0;
first = false;
} else {
return ProgState::Running;
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_format_duration() {
for pair in &[
(0.000003932, "3.9u"),
(0.00448, "4.5m"),
(0.3, "300m"),
(2042.0, "34.0M"),
(3456000.0, "40.0D"),
(60480000.0, "1.9Y"),
] {
let result = super::format_duration(pair.0);
assert_eq!(&result, pair.1);
println!("{} -> {} ({})", pair.0, &result, pair.1);
}
}
#[test]
fn test_parse_duration() {
for pair in &[
(0.0000039, "3.9u"),
(0.0044, "4.4m"),
(0.3, "300m"),
(2040.0, "34.0M"),
(3456000.0, "40.0D"),
(59918400.0, "1.9Y"),
(59918401.1, "1.9Y1s100m"),
(59918401.1, "1.9Y1.1s"),
(59918401.102, "1.9Y 1.1s 2000 u"),
(1.27, "1.27"),
(1.37, "100m1.27"),
] {
let result = super::parse_duration(pair.1).unwrap();
assert_eq!(pair.0, result);
println!("{} -> {} ({})", pair.1, result, pair.0);
}
}
}
| ProgState | identifier_name |
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &V | <u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i != 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr != 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_NOT_FOUND {
return louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length != suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key != cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0 != K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2 != K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p != k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result != K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice()) != K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask != 0 {
updated &= !mask;
break;
} else {
updated |= mask;
}
}
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &= !mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | ec<Vec | identifier_name |
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &Vec<Vec<u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i != 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr != 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_N | d_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length != suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key != cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0 != K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2 != K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p != k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result != K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice()) != K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask != 0 {
updated &= !mask;
break;
} else {
updated |= mask;
}
}
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &= !mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | OT_FOUND {
return louds_sparse.fin | conditional_block |
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &Vec<Vec<u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i != 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr != 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_NOT_FOUND {
return louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length != suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key != cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0 != K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2 != K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p != k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result != K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice()) != K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask != 0 {
updated &= !mask;
break;
} else {
updated |= mask;
}
}
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &= !mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | identifier_body |
||
trie.rs | use crate::config::*;
use crate::louds_dense::LoudsDense;
use crate::louds_sparse::LoudsSparse;
use crate::builder;
pub struct Trie {
louds_dense: LoudsDense,
louds_sparse: LoudsSparse,
suffixes: Vec<Suffix>,
}
// 生ポインタを使えばもっと速くなる
// ベクタofベクタだとキャッシュにも乗らない
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Suffix {
contents: Vec<u8>,
}
impl Trie {
pub fn new(keys: &Vec<Vec<u8>>) -> Self {
let include_dense = K_INCLUDE_DENSE;
let sparse_dense = K_SPARSE_DENSE_RATIO;
let mut builder = builder::Builder::new(include_dense, sparse_dense);
builder.build(&keys);
let louds_dense = LoudsDense::new(&builder);
let louds_sparse = LoudsSparse::new(&builder);
let mut num_keys = 0;
for level in 0..louds_sparse.get_height() {
num_keys += builder.get_suffix_counts()[level];
}
let mut suffix_builder: Vec<Suffix> = vec![
Suffix {
contents: Vec::new(),
};
num_keys
];
for i in 0..keys.len() {
if i != 0 && keys[i] == keys[i - 1] {
continue;
}
let (key_id, level) = Trie::traverse(&louds_dense, &louds_sparse, keys[i].as_slice());
assert!(key_id < num_keys);
let contents = keys[i][level..].to_vec();
suffix_builder[key_id] = Suffix { contents };
}
// suffix_builder.sort();
// let mut suffix_ptrs: Vec<usize> = vec![0; num_keys];
// let mut suffixes = vec![];
// let mut prev_suffix = Suffix {
// contents: Vec::new(),
// key_id: kNotFound,
// };
// for i in 0..num_keys {
// let curr_suffix = suffix_builder[num_keys - i - 1];
// if curr_suffix.contents.len() == 0 {
// suffix_ptrs[curr_suffix.key_id] = 0;
// continue;
// }
// let mut num_match = 0;
// while num_match < curr_suffix.contents.len()
// && num_match < prev_suffix.contents.len()
// && prev_suffix.contents[num_match] == curr_suffix.contents[num_match]
// {
// num_match += 1;
// }
// if num_match == curr_suffix.contents.len() && prev_suffix.contents.len() != 0 {
// suffix_ptrs[curr_suffix.key_id] = suffix_ptrs[prev_suffix.key_id] + (prev_suffix.contents.len() - num_match)
// } else {
// suffix_ptrs[curr_suffix.key_id] = suffixes.len();
// suffixes.push(curr_suffix);
// }
// prev_suffix = curr_suffix;
// }
// let mut suf_bits = 0;
// let mut max_ptr = suffixes.len();
// suf_bits += 1;
// max_ptr >>= 1;
// while max_ptr != 0 {
// suf_bits += 1;
// max_ptr >>= 1;
// }
// let suffix_ptrs =
return Trie {
louds_dense,
louds_sparse,
suffixes: suffix_builder,
}
}
fn traverse(
louds_dense: &LoudsDense,
louds_sparse: &LoudsSparse,
key: &key_t,
) -> (position_t, level_t) {
let ret = louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_NOT_FOUND {
return louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
fn _traverse(
&self,
key: &key_t,
) -> (position_t, level_t) {
let ret = self.louds_dense.find_key(key);
if ret.0 != K_NOT_FOUND {
return (ret.0, ret.1);
}
if ret.2 != K_NOT_FOUND {
return self.louds_sparse.find_key(key, ret.2);
}
return (ret.0, ret.1);
}
pub fn exact_search(&self, key: &key_t) -> position_t {
let (key_id, level) = self._traverse(key);
if key_id == K_NOT_FOUND {
return K_NOT_FOUND
}
let suffix = &self.suffixes[key_id].contents;
let length = key.len() - level;
if length != suffix.len() {
return K_NOT_FOUND
}
for (cur_key, cur_suf) in key[level..].iter().zip(suffix.iter()) {
if cur_key != cur_suf {
return K_NOT_FOUND
}
}
return key_id
}
// // 見つかったかどうか,直前の探索のログを返したい.
// fn caching_search(&self, previous_key: &key_t, key: &key_t, cache: Cache) -> position_t {
// let diff_level = self.find_different_level(previous_key, key);
// let (key_id, level) =
// if diff_level < self.louds_sparse.get_start_level() {
// let ret = self.louds_dense.find_key_with_cache(key, cache, diff_level);
// if ret.0 != K_NOT_FOUND {
// (ret.0, ret.1)
// } else if ret.2 != K_NOT_FOUND {
// self.louds_sparse.find_key_with_cache(key, ret.2, cache, diff_level)
// } else {
// (ret.0, ret.1)
// }
// } else {
// self.louds_sparse.find_key_with_cache(key, 0, cache, diff_level)
// };
// }
// fn find_different_level(&self, pre_key: &key_t, key: &key_t) -> level_t {
// let mut diff_level = 0;
// for (p, k) in pre_key.iter().zip(key) {
// if p != k {
// return diff_level
// } else {
// diff_level += 1;
// }
// }
// return diff_level
// }
// time_range is depends on encoding specification
pub fn doe_search(&self, time_range: usize, keys: &Vec<Vec<u8>>) -> bool {
let mut sequnce_count = 0;
let th = TrajectoryHash::new(7, 20, 16);
for key in keys.iter() {
// let result = self.exact_search(&key);
// let is_find = result != K_NOT_FOUND;
let is_find = self.accurate_search(key, &th);
if is_find {
sequnce_count += 1;
if sequnce_count >= time_range {
return true
}
} else {
sequnce_count = 0;
}
}
return false
}
pub fn accurate_search(&self, key: &key_t, th: &TrajectoryHash) -> bool {
let neighbors = self.get_neighbors(key, th);
for nei in neighbors {
if self.exact_search(nei.as_slice()) != K_NOT_FOUND {
return true
}
}
false
}
pub fn get_neighbors(&self, key: &key_t, th: &TrajectoryHash) -> Vec<Vec<u8>> {
let mut vec = Vec::with_capacity(EXTEND_NUMBER);
let value: u128 = read_be_u128(key);
// tiles to hash values
for position in ACCURATE_GRID {
let bytes = u128_to_bytes(th.calc(value, position), th.byte_length);
vec.push(bytes);
}
vec
}
}
pub struct TrajectoryHash {
byte_length: usize,
pub mask_lists: [Vec<u128>; 3], // ascend order
}
impl TrajectoryHash {
pub fn new(byte_length: usize, geo_length: usize, time_length: usize) -> Self {
let mut geo_lng_mask = 0b100u128;
let mut geo_lat_mask = 0b010u128;
let mut time_mask = 0b001u128;
let diff = (geo_length as i32) - (time_length as i32);
let mut mask_lists = [Vec::new(), Vec::new(), Vec::new()];
if diff >= 0 {
for _ in 0..time_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
geo_lng_mask >>= 3;
geo_lng_mask <<= 2;
geo_lat_mask >>= 3;
geo_lat_mask <<= 2;
for _ in 0..diff {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 2;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 2;
}
} else {
for _ in 0..geo_length {
mask_lists[0].push(geo_lng_mask);
geo_lng_mask <<= 3;
mask_lists[1].push(geo_lat_mask);
geo_lat_mask <<= 3;
mask_lists[2].push(time_mask);
time_mask <<= 3;
}
for _ in 0..(-diff) {
mask_lists[2].push(time_mask);
time_mask <<= 1;
}
}
TrajectoryHash { byte_length, mask_lists }
}
pub fn calc(&self, value: u128, pos: [i32;3]) -> u128 {
let mut updated = value;
for (dimension, direction) in pos.iter().enumerate() {
match direction {
-1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask != 0 {
updated &= !mask; | }
},
0 => {},
1 => {
for mask in self.mask_lists[dimension].iter() {
if value & mask == 0 {
updated |= mask;
break;
} else {
updated &= !mask;
}
}
},
_ => panic!("invalid value of direction!")
}
}
updated
}
}
fn read_be_u128(input: &[u8]) -> u128 {
let mut output = 0u128;
let digit = input.len() - 1;
for (i, byte) in input.iter().enumerate() {
output |= (*byte as u128) << 8*(digit - i);
}
output
}
fn u128_to_bytes(value: u128, byte_length: usize) -> Vec<u8> {
value.to_be_bytes()[16-byte_length..].to_vec()
} | break;
} else {
updated |= mask;
} | random_line_split |
tau0305.py | from numba import jit, njit,prange
from numba import cuda, int32, complex128, float64, int64
import numpy as np
import threading
import math
import random
import torch
import weibull
import itertools
from scipy.spatial import distance as compute_distance
from scipy.spatial.distance import squareform
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KernelDensity
#####################################################################################
#Customize CUDA Kernels
@cuda.jit(device = True)
def cosine_gpu(u, v):
dot_product = 0
norm_v = 0
norm_u = 0
for m, n in zip(u, v):
dot_product += m * n
for m, n in zip(u, u):
norm_u += m * n
for m, n in zip(v, v):
norm_v += m * n
return 1.0 - dot_product / (math.sqrt(norm_u) * math.sqrt(norm_v))
@cuda.jit(device = True)
def euclidean_gpu(u, v):
norm = 0
for m, n in zip(u, v):
norm += (m - n) * (m - n)
norm = math.sqrt(norm)
return norm
@cuda.jit
def cosine_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = cosine_gpu(u, v)
@cuda.jit
def euclidean_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = euclidean_gpu(u, v)
#####################################################################################
def tau(args, features, gpus):
#Now only support Cosine and Euclidean on GPU
if args.distance_metric:
metrics = [args.distance_metric]
else:
metrics =['cosine','euclidean']
print("The Distance Metric is: ", metrics)
#CUDA parallel distance computing, support multi-gpus
def gpu_pairwise_distance(chunks, step_i, gpu):
#lock.acquire()#no need to lock threads in this case
cuda.select_device(gpu)
for i, chunk1 in enumerate(chunks):
print("Computing distance chunk: ", i + 1)
#Define chunk id x axis
step_j = 0
n_i = chunk1.shape[0]
for j, chunk2 in enumerate(chunks):
#Copy data to gpu
X_global = cuda.to_device(chunk1)
Y_global = cuda.to_device(chunk2)
#Define chunk id y axis
n_j = chunk2.shape[0]
out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0]))
# Define gpu's grid
threadsperblock = (16, 16)
blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0]))
blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1]))
blockspergrid = (blockspergrid_x, blockspergrid_y)
#Compute distance on gpu
if metric == "cosine":
cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
elif metric == "euclidean":
euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
#Find mean and max for each loop
mean_distances.append(np.mean(out_global.copy_to_host()))
max_dis.append(np.max(out_global.copy_to_host()))
#Select 2% points to EVT randomly
k = int(len(out_global.copy_to_host()) * 0.02)
number_of_rows = out_global.copy_to_host().shape[0]
random_indices = np.random.choice(number_of_rows, size=k, replace=False)
#Copy gpu distance data to cpu numpy
if len(out_global.copy_to_host()[random_indices, :]) > 0:
whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten())
#update chunk id
step_j += n_j
step_i += n_i
del X_global, Y_global, out_global
for metric in metrics:
distances = []
mean_distances = []
max_dis = []
#Split cpu's data to gpus
n = int(len(features) / len(gpus))
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
threads = []
from split import split_double
#Compute chunks in multi-gpus
for p, gpu in enumerate(gpus):
whole_distances = []
split = split_double(args, mutilple_features[p])
n = int(len(mutilple_features[p]) / split)
chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )]
step_i = 0
threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),]))
#Number of threads depend on how many gpus you have
for t in threads:
t.setDaemon(True)
t.start()
#Re-group final distance data from gpus
for t in threads:
whole_distances = []
t.join()
distances.extend(np.array(whole_distances).flatten())
#Process data
random_distances = np.array(distances).flatten()
random_distances = random_distances.reshape((random_distances.shape[0], 1)).T
mean_distances = np.mean(mean_distances)
print("mean_distances: ",mean_distances)
print("max dis:", max(max_dis))#original max dis before EVT
###################################################################
########################################################################################
print("Finding Nearest Points......")
#Find nearest points on GPUs
from gpu_functions import gpu_nearest
nearest_cluster = np.zeros((len(features)), dtype = 'int')
nearest_points_dis = np.zeros((len(features)))
n = int(len(features) / len(gpus))
features = list(features)
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_features) > len(gpus):
mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)])
del mutilple_features[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_features[0:len(gpus)]):
ind.append(range(step, len(j)+step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis]))
thread(threads)
del mutilple_features
# In round 1 the centroids is the points no matter what's linkage
nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))]
nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first
nearest_points = nearest_cluster
########################################################################################
print("Computing the appearance of nearest_points")
threadsperblock = 32
blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock)
X_global = cuda.to_device(nearest_points)
out_global = cuda.device_array((nearest_points.shape[0]))
from cuda_kernels import count_appear
count_appear[blockspergrid, threadsperblock](X_global, out_global)
appear = np.array(out_global.copy_to_host(), dtype = int)
appear_count = [[j, i] for i, j in enumerate(appear)]
# count the appearance of each kernel points
# generate order
order = [i[1] for i in sorted(appear_count, reverse=True)]
# add non kernel points to order
processed = set()
init = []
for count, i in enumerate(order):
j = nearest_points[i]
if i not in processed and j not in processed:
init.append([i, j])
processed.add(i)
processed.add(j)
init = init[0: int(len(init))]
N = len(init)
init_length = N
init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups.
######################################################################################################
print("Finding Nearest Intial Pairs")
#Computing nearest centroids on GPUs
centroids = [np.mean(i,axis=0) for i in init_features]
X = centroids
from gpu_functions import gpu_nearest_init_centroids
gs = np.zeros((len(init_features)))
nearest_init = np.zeros((len(init_features)), dtype = 'int')
n = int(len(centroids) / len(gpus))
mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_centroids) > len(gpus):
mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)])
del mutilple_centroids[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_centroids[0:len(gpus)]):
ind.append(range(step, len(j) + step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init]))
thread(threads)
del mutilple_centroids
##########################################################################################################
#Nearest initial pairs combo
nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)]
##########################################################################################################
gxs = []
print("Computing Gaps")
# Computing gaps on GPUs
from gpu_functions import gpu_distance
for pair1, pair2 in nearest_init_combo:
round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair2[0], pair2[1]]])
features0 = [features[k] for k in pair1] #extract features of cluster0
features1 = [features[k] for k in pair2] #extract features of cluster1
centroid0 = np.mean(features0, axis=0) # Get controid of initial pair0
centroid1 = np.mean(features1, axis=0) # Get controid of initial pair1
if metric == "cosine":
gx = scipy.spatial.distance.cosine(centroid0, centroid1)
elif metric == "euclidean":
gx = scipy.spatial.distance.euclidean(centroid0, centroid1)
gxs.append(gx) #gaps
#Our tau
number_of_clusters = 30
thresh = 0.01
tw = weibull.weibull()
data = torch.Tensor(gxs)
fullrange = torch.linspace(0, 1, 100)
tailj = torch.linspace(.45, .55, 10)
torch.Tensor.ndim = property(lambda self: len(self.shape))
tw.FitHigh(data.view(1, -1), int(1. * len(data)))
parms = tw.return_all_parameters()
print(parms)
pcent = 1 - 1 / len(data)
pcent = 0.99
print("EVT Tau for ", pcent * 100, " Percentile at ",
parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor'])
# wscoresj = tw.wscore(tailj)
# print("Ijbb Wscores=",tailj,wscoresj)
wscoresj = tw.wscore(fullrange)
tau_T = parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor']
tau_T = tau_T.numpy()[0][0]
return 0, T, tau, nearest_points, metric, init_length, nearest_cluster_with_distance_round_1, nearest_points_dis, gx, 0
def nan_to_num(t,mynan=0.):
if torch.all(torch.isfinite(t)):
return t
if len(t.size()) == 0: | tw = weibull.weibull()
nbin=200
nscale = 10
#fullrange = torch.linspace(0,torch.max(ijbbdata),nbin)
fullrange = torch.linspace(0,maxval,nbin)
torch.Tensor.ndim = property(lambda self: len(self.shape))
#print( name , "Data mean, max", torch.mean(ijbbdata),torch.max(ijbbdata))
imean = torch.mean(data)
istd = torch.std(data)
imax = torch.max(data)
tw.FitHighTrimmed(data.view(1,-1),int(tailfrac*len(data)))
parms = tw.return_all_parameters()
wscoresj = tw.wscore(fullrange)
probj = nan_to_num(tw.prob(fullrange))
if(torch.sum(probj) > .001):
probj = probj/torch.sum(probj)
tau= parms['Scale']*np.power(-np.log((1-pcent)),(1/parms['Shape'])) - parms['translateAmountTensor'] + parms['smallScoreTensor']
return tau.numpy()
def thread(threads):
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
def takeSecond(elem):
return elem[1] | return torch.tensor(mynan)
return torch.cat([nan_to_num(l).unsqueeze(0) for l in t],0)
def get_tau(data,maxval,tailfrac=.25,pcent=.999):
#tw = weibull.weibull(translateAmountTensor=.001) | random_line_split |
tau0305.py | from numba import jit, njit,prange
from numba import cuda, int32, complex128, float64, int64
import numpy as np
import threading
import math
import random
import torch
import weibull
import itertools
from scipy.spatial import distance as compute_distance
from scipy.spatial.distance import squareform
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KernelDensity
#####################################################################################
#Customize CUDA Kernels
@cuda.jit(device = True)
def cosine_gpu(u, v):
dot_product = 0
norm_v = 0
norm_u = 0
for m, n in zip(u, v):
dot_product += m * n
for m, n in zip(u, u):
norm_u += m * n
for m, n in zip(v, v):
norm_v += m * n
return 1.0 - dot_product / (math.sqrt(norm_u) * math.sqrt(norm_v))
@cuda.jit(device = True)
def euclidean_gpu(u, v):
norm = 0
for m, n in zip(u, v):
norm += (m - n) * (m - n)
norm = math.sqrt(norm)
return norm
@cuda.jit
def cosine_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = cosine_gpu(u, v)
@cuda.jit
def | (X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = euclidean_gpu(u, v)
#####################################################################################
def tau(args, features, gpus):
#Now only support Cosine and Euclidean on GPU
if args.distance_metric:
metrics = [args.distance_metric]
else:
metrics =['cosine','euclidean']
print("The Distance Metric is: ", metrics)
#CUDA parallel distance computing, support multi-gpus
def gpu_pairwise_distance(chunks, step_i, gpu):
#lock.acquire()#no need to lock threads in this case
cuda.select_device(gpu)
for i, chunk1 in enumerate(chunks):
print("Computing distance chunk: ", i + 1)
#Define chunk id x axis
step_j = 0
n_i = chunk1.shape[0]
for j, chunk2 in enumerate(chunks):
#Copy data to gpu
X_global = cuda.to_device(chunk1)
Y_global = cuda.to_device(chunk2)
#Define chunk id y axis
n_j = chunk2.shape[0]
out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0]))
# Define gpu's grid
threadsperblock = (16, 16)
blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0]))
blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1]))
blockspergrid = (blockspergrid_x, blockspergrid_y)
#Compute distance on gpu
if metric == "cosine":
cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
elif metric == "euclidean":
euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
#Find mean and max for each loop
mean_distances.append(np.mean(out_global.copy_to_host()))
max_dis.append(np.max(out_global.copy_to_host()))
#Select 2% points to EVT randomly
k = int(len(out_global.copy_to_host()) * 0.02)
number_of_rows = out_global.copy_to_host().shape[0]
random_indices = np.random.choice(number_of_rows, size=k, replace=False)
#Copy gpu distance data to cpu numpy
if len(out_global.copy_to_host()[random_indices, :]) > 0:
whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten())
#update chunk id
step_j += n_j
step_i += n_i
del X_global, Y_global, out_global
for metric in metrics:
distances = []
mean_distances = []
max_dis = []
#Split cpu's data to gpus
n = int(len(features) / len(gpus))
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
threads = []
from split import split_double
#Compute chunks in multi-gpus
for p, gpu in enumerate(gpus):
whole_distances = []
split = split_double(args, mutilple_features[p])
n = int(len(mutilple_features[p]) / split)
chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )]
step_i = 0
threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),]))
#Number of threads depend on how many gpus you have
for t in threads:
t.setDaemon(True)
t.start()
#Re-group final distance data from gpus
for t in threads:
whole_distances = []
t.join()
distances.extend(np.array(whole_distances).flatten())
#Process data
random_distances = np.array(distances).flatten()
random_distances = random_distances.reshape((random_distances.shape[0], 1)).T
mean_distances = np.mean(mean_distances)
print("mean_distances: ",mean_distances)
print("max dis:", max(max_dis))#original max dis before EVT
###################################################################
########################################################################################
print("Finding Nearest Points......")
#Find nearest points on GPUs
from gpu_functions import gpu_nearest
nearest_cluster = np.zeros((len(features)), dtype = 'int')
nearest_points_dis = np.zeros((len(features)))
n = int(len(features) / len(gpus))
features = list(features)
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_features) > len(gpus):
mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)])
del mutilple_features[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_features[0:len(gpus)]):
ind.append(range(step, len(j)+step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis]))
thread(threads)
del mutilple_features
# In round 1 the centroids is the points no matter what's linkage
nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))]
nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first
nearest_points = nearest_cluster
########################################################################################
print("Computing the appearance of nearest_points")
threadsperblock = 32
blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock)
X_global = cuda.to_device(nearest_points)
out_global = cuda.device_array((nearest_points.shape[0]))
from cuda_kernels import count_appear
count_appear[blockspergrid, threadsperblock](X_global, out_global)
appear = np.array(out_global.copy_to_host(), dtype = int)
appear_count = [[j, i] for i, j in enumerate(appear)]
# count the appearance of each kernel points
# generate order
order = [i[1] for i in sorted(appear_count, reverse=True)]
# add non kernel points to order
processed = set()
init = []
for count, i in enumerate(order):
j = nearest_points[i]
if i not in processed and j not in processed:
init.append([i, j])
processed.add(i)
processed.add(j)
init = init[0: int(len(init))]
N = len(init)
init_length = N
init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups.
######################################################################################################
print("Finding Nearest Intial Pairs")
#Computing nearest centroids on GPUs
centroids = [np.mean(i,axis=0) for i in init_features]
X = centroids
from gpu_functions import gpu_nearest_init_centroids
gs = np.zeros((len(init_features)))
nearest_init = np.zeros((len(init_features)), dtype = 'int')
n = int(len(centroids) / len(gpus))
mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_centroids) > len(gpus):
mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)])
del mutilple_centroids[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_centroids[0:len(gpus)]):
ind.append(range(step, len(j) + step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init]))
thread(threads)
del mutilple_centroids
##########################################################################################################
#Nearest initial pairs combo
nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)]
##########################################################################################################
gxs = []
print("Computing Gaps")
# Computing gaps on GPUs
from gpu_functions import gpu_distance
for pair1, pair2 in nearest_init_combo:
round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair2[0], pair2[1]]])
features0 = [features[k] for k in pair1] #extract features of cluster0
features1 = [features[k] for k in pair2] #extract features of cluster1
centroid0 = np.mean(features0, axis=0) # Get controid of initial pair0
centroid1 = np.mean(features1, axis=0) # Get controid of initial pair1
if metric == "cosine":
gx = scipy.spatial.distance.cosine(centroid0, centroid1)
elif metric == "euclidean":
gx = scipy.spatial.distance.euclidean(centroid0, centroid1)
gxs.append(gx) #gaps
#Our tau
number_of_clusters = 30
thresh = 0.01
tw = weibull.weibull()
data = torch.Tensor(gxs)
fullrange = torch.linspace(0, 1, 100)
tailj = torch.linspace(.45, .55, 10)
torch.Tensor.ndim = property(lambda self: len(self.shape))
tw.FitHigh(data.view(1, -1), int(1. * len(data)))
parms = tw.return_all_parameters()
print(parms)
pcent = 1 - 1 / len(data)
pcent = 0.99
print("EVT Tau for ", pcent * 100, " Percentile at ",
parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor'])
# wscoresj = tw.wscore(tailj)
# print("Ijbb Wscores=",tailj,wscoresj)
wscoresj = tw.wscore(fullrange)
tau_T = parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor']
tau_T = tau_T.numpy()[0][0]
return 0, T, tau, nearest_points, metric, init_length, nearest_cluster_with_distance_round_1, nearest_points_dis, gx, 0
def nan_to_num(t,mynan=0.):
if torch.all(torch.isfinite(t)):
return t
if len(t.size()) == 0:
return torch.tensor(mynan)
return torch.cat([nan_to_num(l).unsqueeze(0) for l in t],0)
def get_tau(data,maxval,tailfrac=.25,pcent=.999):
#tw = weibull.weibull(translateAmountTensor=.001)
tw = weibull.weibull()
nbin=200
nscale = 10
#fullrange = torch.linspace(0,torch.max(ijbbdata),nbin)
fullrange = torch.linspace(0,maxval,nbin)
torch.Tensor.ndim = property(lambda self: len(self.shape))
#print( name , "Data mean, max", torch.mean(ijbbdata),torch.max(ijbbdata))
imean = torch.mean(data)
istd = torch.std(data)
imax = torch.max(data)
tw.FitHighTrimmed(data.view(1,-1),int(tailfrac*len(data)))
parms = tw.return_all_parameters()
wscoresj = tw.wscore(fullrange)
probj = nan_to_num(tw.prob(fullrange))
if(torch.sum(probj) > .001):
probj = probj/torch.sum(probj)
tau= parms['Scale']*np.power(-np.log((1-pcent)),(1/parms['Shape'])) - parms['translateAmountTensor'] + parms['smallScoreTensor']
return tau.numpy()
def thread(threads):
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
def takeSecond(elem):
return elem[1]
| euclidean_dis_gpu | identifier_name |
tau0305.py | from numba import jit, njit,prange
from numba import cuda, int32, complex128, float64, int64
import numpy as np
import threading
import math
import random
import torch
import weibull
import itertools
from scipy.spatial import distance as compute_distance
from scipy.spatial.distance import squareform
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KernelDensity
#####################################################################################
#Customize CUDA Kernels
@cuda.jit(device = True)
def cosine_gpu(u, v):
dot_product = 0
norm_v = 0
norm_u = 0
for m, n in zip(u, v):
dot_product += m * n
for m, n in zip(u, u):
norm_u += m * n
for m, n in zip(v, v):
norm_v += m * n
return 1.0 - dot_product / (math.sqrt(norm_u) * math.sqrt(norm_v))
@cuda.jit(device = True)
def euclidean_gpu(u, v):
norm = 0
for m, n in zip(u, v):
norm += (m - n) * (m - n)
norm = math.sqrt(norm)
return norm
@cuda.jit
def cosine_dis_gpu(X, Y, out):
|
@cuda.jit
def euclidean_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = euclidean_gpu(u, v)
#####################################################################################
def tau(args, features, gpus):
#Now only support Cosine and Euclidean on GPU
if args.distance_metric:
metrics = [args.distance_metric]
else:
metrics =['cosine','euclidean']
print("The Distance Metric is: ", metrics)
#CUDA parallel distance computing, support multi-gpus
def gpu_pairwise_distance(chunks, step_i, gpu):
#lock.acquire()#no need to lock threads in this case
cuda.select_device(gpu)
for i, chunk1 in enumerate(chunks):
print("Computing distance chunk: ", i + 1)
#Define chunk id x axis
step_j = 0
n_i = chunk1.shape[0]
for j, chunk2 in enumerate(chunks):
#Copy data to gpu
X_global = cuda.to_device(chunk1)
Y_global = cuda.to_device(chunk2)
#Define chunk id y axis
n_j = chunk2.shape[0]
out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0]))
# Define gpu's grid
threadsperblock = (16, 16)
blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0]))
blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1]))
blockspergrid = (blockspergrid_x, blockspergrid_y)
#Compute distance on gpu
if metric == "cosine":
cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
elif metric == "euclidean":
euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
#Find mean and max for each loop
mean_distances.append(np.mean(out_global.copy_to_host()))
max_dis.append(np.max(out_global.copy_to_host()))
#Select 2% points to EVT randomly
k = int(len(out_global.copy_to_host()) * 0.02)
number_of_rows = out_global.copy_to_host().shape[0]
random_indices = np.random.choice(number_of_rows, size=k, replace=False)
#Copy gpu distance data to cpu numpy
if len(out_global.copy_to_host()[random_indices, :]) > 0:
whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten())
#update chunk id
step_j += n_j
step_i += n_i
del X_global, Y_global, out_global
for metric in metrics:
distances = []
mean_distances = []
max_dis = []
#Split cpu's data to gpus
n = int(len(features) / len(gpus))
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
threads = []
from split import split_double
#Compute chunks in multi-gpus
for p, gpu in enumerate(gpus):
whole_distances = []
split = split_double(args, mutilple_features[p])
n = int(len(mutilple_features[p]) / split)
chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )]
step_i = 0
threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),]))
#Number of threads depend on how many gpus you have
for t in threads:
t.setDaemon(True)
t.start()
#Re-group final distance data from gpus
for t in threads:
whole_distances = []
t.join()
distances.extend(np.array(whole_distances).flatten())
#Process data
random_distances = np.array(distances).flatten()
random_distances = random_distances.reshape((random_distances.shape[0], 1)).T
mean_distances = np.mean(mean_distances)
print("mean_distances: ",mean_distances)
print("max dis:", max(max_dis))#original max dis before EVT
###################################################################
########################################################################################
print("Finding Nearest Points......")
#Find nearest points on GPUs
from gpu_functions import gpu_nearest
nearest_cluster = np.zeros((len(features)), dtype = 'int')
nearest_points_dis = np.zeros((len(features)))
n = int(len(features) / len(gpus))
features = list(features)
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_features) > len(gpus):
mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)])
del mutilple_features[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_features[0:len(gpus)]):
ind.append(range(step, len(j)+step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis]))
thread(threads)
del mutilple_features
# In round 1 the centroids is the points no matter what's linkage
nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))]
nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first
nearest_points = nearest_cluster
########################################################################################
print("Computing the appearance of nearest_points")
threadsperblock = 32
blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock)
X_global = cuda.to_device(nearest_points)
out_global = cuda.device_array((nearest_points.shape[0]))
from cuda_kernels import count_appear
count_appear[blockspergrid, threadsperblock](X_global, out_global)
appear = np.array(out_global.copy_to_host(), dtype = int)
appear_count = [[j, i] for i, j in enumerate(appear)]
# count the appearance of each kernel points
# generate order
order = [i[1] for i in sorted(appear_count, reverse=True)]
# add non kernel points to order
processed = set()
init = []
for count, i in enumerate(order):
j = nearest_points[i]
if i not in processed and j not in processed:
init.append([i, j])
processed.add(i)
processed.add(j)
init = init[0: int(len(init))]
N = len(init)
init_length = N
init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups.
######################################################################################################
print("Finding Nearest Intial Pairs")
#Computing nearest centroids on GPUs
centroids = [np.mean(i,axis=0) for i in init_features]
X = centroids
from gpu_functions import gpu_nearest_init_centroids
gs = np.zeros((len(init_features)))
nearest_init = np.zeros((len(init_features)), dtype = 'int')
n = int(len(centroids) / len(gpus))
mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_centroids) > len(gpus):
mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)])
del mutilple_centroids[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_centroids[0:len(gpus)]):
ind.append(range(step, len(j) + step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init]))
thread(threads)
del mutilple_centroids
##########################################################################################################
#Nearest initial pairs combo
nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)]
##########################################################################################################
gxs = []
print("Computing Gaps")
# Computing gaps on GPUs
from gpu_functions import gpu_distance
for pair1, pair2 in nearest_init_combo:
round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair2[0], pair2[1]]])
features0 = [features[k] for k in pair1] #extract features of cluster0
features1 = [features[k] for k in pair2] #extract features of cluster1
centroid0 = np.mean(features0, axis=0) # Get controid of initial pair0
centroid1 = np.mean(features1, axis=0) # Get controid of initial pair1
if metric == "cosine":
gx = scipy.spatial.distance.cosine(centroid0, centroid1)
elif metric == "euclidean":
gx = scipy.spatial.distance.euclidean(centroid0, centroid1)
gxs.append(gx) #gaps
#Our tau
number_of_clusters = 30
thresh = 0.01
tw = weibull.weibull()
data = torch.Tensor(gxs)
fullrange = torch.linspace(0, 1, 100)
tailj = torch.linspace(.45, .55, 10)
torch.Tensor.ndim = property(lambda self: len(self.shape))
tw.FitHigh(data.view(1, -1), int(1. * len(data)))
parms = tw.return_all_parameters()
print(parms)
pcent = 1 - 1 / len(data)
pcent = 0.99
print("EVT Tau for ", pcent * 100, " Percentile at ",
parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor'])
# wscoresj = tw.wscore(tailj)
# print("Ijbb Wscores=",tailj,wscoresj)
wscoresj = tw.wscore(fullrange)
tau_T = parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor']
tau_T = tau_T.numpy()[0][0]
return 0, T, tau, nearest_points, metric, init_length, nearest_cluster_with_distance_round_1, nearest_points_dis, gx, 0
def nan_to_num(t,mynan=0.):
if torch.all(torch.isfinite(t)):
return t
if len(t.size()) == 0:
return torch.tensor(mynan)
return torch.cat([nan_to_num(l).unsqueeze(0) for l in t],0)
def get_tau(data,maxval,tailfrac=.25,pcent=.999):
#tw = weibull.weibull(translateAmountTensor=.001)
tw = weibull.weibull()
nbin=200
nscale = 10
#fullrange = torch.linspace(0,torch.max(ijbbdata),nbin)
fullrange = torch.linspace(0,maxval,nbin)
torch.Tensor.ndim = property(lambda self: len(self.shape))
#print( name , "Data mean, max", torch.mean(ijbbdata),torch.max(ijbbdata))
imean = torch.mean(data)
istd = torch.std(data)
imax = torch.max(data)
tw.FitHighTrimmed(data.view(1,-1),int(tailfrac*len(data)))
parms = tw.return_all_parameters()
wscoresj = tw.wscore(fullrange)
probj = nan_to_num(tw.prob(fullrange))
if(torch.sum(probj) > .001):
probj = probj/torch.sum(probj)
tau= parms['Scale']*np.power(-np.log((1-pcent)),(1/parms['Shape'])) - parms['translateAmountTensor'] + parms['smallScoreTensor']
return tau.numpy()
def thread(threads):
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
def takeSecond(elem):
return elem[1]
| i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = cosine_gpu(u, v) | identifier_body |
tau0305.py | from numba import jit, njit,prange
from numba import cuda, int32, complex128, float64, int64
import numpy as np
import threading
import math
import random
import torch
import weibull
import itertools
from scipy.spatial import distance as compute_distance
from scipy.spatial.distance import squareform
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KernelDensity
#####################################################################################
#Customize CUDA Kernels
@cuda.jit(device = True)
def cosine_gpu(u, v):
dot_product = 0
norm_v = 0
norm_u = 0
for m, n in zip(u, v):
dot_product += m * n
for m, n in zip(u, u):
norm_u += m * n
for m, n in zip(v, v):
norm_v += m * n
return 1.0 - dot_product / (math.sqrt(norm_u) * math.sqrt(norm_v))
@cuda.jit(device = True)
def euclidean_gpu(u, v):
norm = 0
for m, n in zip(u, v):
norm += (m - n) * (m - n)
norm = math.sqrt(norm)
return norm
@cuda.jit
def cosine_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = cosine_gpu(u, v)
@cuda.jit
def euclidean_dis_gpu(X, Y, out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
u = X[i]
v = Y[j]
out[i, j] = euclidean_gpu(u, v)
#####################################################################################
def tau(args, features, gpus):
#Now only support Cosine and Euclidean on GPU
if args.distance_metric:
metrics = [args.distance_metric]
else:
metrics =['cosine','euclidean']
print("The Distance Metric is: ", metrics)
#CUDA parallel distance computing, support multi-gpus
def gpu_pairwise_distance(chunks, step_i, gpu):
#lock.acquire()#no need to lock threads in this case
cuda.select_device(gpu)
for i, chunk1 in enumerate(chunks):
print("Computing distance chunk: ", i + 1)
#Define chunk id x axis
step_j = 0
n_i = chunk1.shape[0]
for j, chunk2 in enumerate(chunks):
#Copy data to gpu
X_global = cuda.to_device(chunk1)
Y_global = cuda.to_device(chunk2)
#Define chunk id y axis
n_j = chunk2.shape[0]
out_global = cuda.device_array((chunk1.shape[0], chunk2.shape[0]))
# Define gpu's grid
threadsperblock = (16, 16)
blockspergrid_x = int(math.ceil(out_global.shape[0] / threadsperblock[0]))
blockspergrid_y = int(math.ceil(out_global.shape[1] / threadsperblock[1]))
blockspergrid = (blockspergrid_x, blockspergrid_y)
#Compute distance on gpu
if metric == "cosine":
cosine_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
elif metric == "euclidean":
euclidean_dis_gpu[blockspergrid, threadsperblock](X_global, Y_global, out_global)
#Find mean and max for each loop
mean_distances.append(np.mean(out_global.copy_to_host()))
max_dis.append(np.max(out_global.copy_to_host()))
#Select 2% points to EVT randomly
k = int(len(out_global.copy_to_host()) * 0.02)
number_of_rows = out_global.copy_to_host().shape[0]
random_indices = np.random.choice(number_of_rows, size=k, replace=False)
#Copy gpu distance data to cpu numpy
if len(out_global.copy_to_host()[random_indices, :]) > 0:
whole_distances.extend(out_global.copy_to_host()[random_indices, :].flatten())
#update chunk id
step_j += n_j
step_i += n_i
del X_global, Y_global, out_global
for metric in metrics:
distances = []
mean_distances = []
max_dis = []
#Split cpu's data to gpus
n = int(len(features) / len(gpus))
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
threads = []
from split import split_double
#Compute chunks in multi-gpus
for p, gpu in enumerate(gpus):
|
#Number of threads depend on how many gpus you have
for t in threads:
t.setDaemon(True)
t.start()
#Re-group final distance data from gpus
for t in threads:
whole_distances = []
t.join()
distances.extend(np.array(whole_distances).flatten())
#Process data
random_distances = np.array(distances).flatten()
random_distances = random_distances.reshape((random_distances.shape[0], 1)).T
mean_distances = np.mean(mean_distances)
print("mean_distances: ",mean_distances)
print("max dis:", max(max_dis))#original max dis before EVT
###################################################################
########################################################################################
print("Finding Nearest Points......")
#Find nearest points on GPUs
from gpu_functions import gpu_nearest
nearest_cluster = np.zeros((len(features)), dtype = 'int')
nearest_points_dis = np.zeros((len(features)))
n = int(len(features) / len(gpus))
features = list(features)
mutilple_features = [features[i * n:(i + 1) * n] for i in range((len(features) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_features) > len(gpus):
mutilple_features[len(gpus) - 1].extend(mutilple_features[len(gpus)])
del mutilple_features[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_features[0:len(gpus)]):
ind.append(range(step, len(j)+step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest, args=[mutilple_features[p], features, int(gpu), ind[p], steps[p], metric, nearest_cluster, nearest_points_dis]))
thread(threads)
del mutilple_features
# In round 1 the centroids is the points no matter what's linkage
nearest_cluster_with_distance_round_1 = [[j, [k, i]] for k, (i, j) in enumerate(zip(nearest_cluster, nearest_points_dis))]
nearest_cluster_with_distance_round_1 = sorted(nearest_cluster_with_distance_round_1) # Sort by distance, process the smallest one first
nearest_points = nearest_cluster
########################################################################################
print("Computing the appearance of nearest_points")
threadsperblock = 32
blockspergrid = math.ceil(nearest_points.shape[0] / threadsperblock)
X_global = cuda.to_device(nearest_points)
out_global = cuda.device_array((nearest_points.shape[0]))
from cuda_kernels import count_appear
count_appear[blockspergrid, threadsperblock](X_global, out_global)
appear = np.array(out_global.copy_to_host(), dtype = int)
appear_count = [[j, i] for i, j in enumerate(appear)]
# count the appearance of each kernel points
# generate order
order = [i[1] for i in sorted(appear_count, reverse=True)]
# add non kernel points to order
processed = set()
init = []
for count, i in enumerate(order):
j = nearest_points[i]
if i not in processed and j not in processed:
init.append([i, j])
processed.add(i)
processed.add(j)
init = init[0: int(len(init))]
N = len(init)
init_length = N
init_features = [[features[i[0]], features[i[1]]] for i in init] #features of initial groups.
######################################################################################################
print("Finding Nearest Intial Pairs")
#Computing nearest centroids on GPUs
centroids = [np.mean(i,axis=0) for i in init_features]
X = centroids
from gpu_functions import gpu_nearest_init_centroids
gs = np.zeros((len(init_features)))
nearest_init = np.zeros((len(init_features)), dtype = 'int')
n = int(len(centroids) / len(gpus))
mutilple_centroids = [centroids[i * n:(i + 1) * n] for i in range((len(centroids) + n - 1) // n )]
if len(gpus) > 1:
if len(mutilple_centroids) > len(gpus):
mutilple_centroids[len(gpus) - 1].extend(mutilple_centroids[len(gpus)])
del mutilple_centroids[len(gpus)]
ind = []
step = 0
steps = []
for i, j in enumerate(mutilple_centroids[0:len(gpus)]):
ind.append(range(step, len(j) + step))
steps.append(step)
step += len(j)
threads = []
for p, gpu in enumerate(gpus):
threads.append(threading.Thread(target=gpu_nearest_init_centroids, args=[mutilple_centroids[p], X, int(gpu), ind[p], metric, gs, nearest_init]))
thread(threads)
del mutilple_centroids
##########################################################################################################
#Nearest initial pairs combo
nearest_init_combo = [[m, init[n]] for m, n in zip(init, nearest_init)]
##########################################################################################################
gxs = []
print("Computing Gaps")
# Computing gaps on GPUs
from gpu_functions import gpu_distance
for pair1, pair2 in nearest_init_combo:
round_features = np.array([features[k] for k in [pair1[0], pair1[1], pair2[0], pair2[1]]])
features0 = [features[k] for k in pair1] #extract features of cluster0
features1 = [features[k] for k in pair2] #extract features of cluster1
centroid0 = np.mean(features0, axis=0) # Get controid of initial pair0
centroid1 = np.mean(features1, axis=0) # Get controid of initial pair1
if metric == "cosine":
gx = scipy.spatial.distance.cosine(centroid0, centroid1)
elif metric == "euclidean":
gx = scipy.spatial.distance.euclidean(centroid0, centroid1)
gxs.append(gx) #gaps
#Our tau
number_of_clusters = 30
thresh = 0.01
tw = weibull.weibull()
data = torch.Tensor(gxs)
fullrange = torch.linspace(0, 1, 100)
tailj = torch.linspace(.45, .55, 10)
torch.Tensor.ndim = property(lambda self: len(self.shape))
tw.FitHigh(data.view(1, -1), int(1. * len(data)))
parms = tw.return_all_parameters()
print(parms)
pcent = 1 - 1 / len(data)
pcent = 0.99
print("EVT Tau for ", pcent * 100, " Percentile at ",
parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor'])
# wscoresj = tw.wscore(tailj)
# print("Ijbb Wscores=",tailj,wscoresj)
wscoresj = tw.wscore(fullrange)
tau_T = parms['Scale'] * np.power(np.log(1 / (1 - pcent)), (1 / parms['Shape'])) - 1 + parms['smallScoreTensor']
tau_T = tau_T.numpy()[0][0]
return 0, T, tau, nearest_points, metric, init_length, nearest_cluster_with_distance_round_1, nearest_points_dis, gx, 0
def nan_to_num(t,mynan=0.):
if torch.all(torch.isfinite(t)):
return t
if len(t.size()) == 0:
return torch.tensor(mynan)
return torch.cat([nan_to_num(l).unsqueeze(0) for l in t],0)
def get_tau(data,maxval,tailfrac=.25,pcent=.999):
#tw = weibull.weibull(translateAmountTensor=.001)
tw = weibull.weibull()
nbin=200
nscale = 10
#fullrange = torch.linspace(0,torch.max(ijbbdata),nbin)
fullrange = torch.linspace(0,maxval,nbin)
torch.Tensor.ndim = property(lambda self: len(self.shape))
#print( name , "Data mean, max", torch.mean(ijbbdata),torch.max(ijbbdata))
imean = torch.mean(data)
istd = torch.std(data)
imax = torch.max(data)
tw.FitHighTrimmed(data.view(1,-1),int(tailfrac*len(data)))
parms = tw.return_all_parameters()
wscoresj = tw.wscore(fullrange)
probj = nan_to_num(tw.prob(fullrange))
if(torch.sum(probj) > .001):
probj = probj/torch.sum(probj)
tau= parms['Scale']*np.power(-np.log((1-pcent)),(1/parms['Shape'])) - parms['translateAmountTensor'] + parms['smallScoreTensor']
return tau.numpy()
def thread(threads):
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
def takeSecond(elem):
return elem[1]
| whole_distances = []
split = split_double(args, mutilple_features[p])
n = int(len(mutilple_features[p]) / split)
chunks = [mutilple_features[p][i * n:(i + 1) * n] for i in range((len(mutilple_features[p]) + n - 1) // n )]
step_i = 0
threads.append(threading.Thread(target=gpu_pairwise_distance, args=[chunks, step_i, int(gpu),])) | conditional_block |
verifier.rs | use argon2::{defaults, Argon2, ParamErr, Variant, Version};
use std::error::Error;
/// The main export here is `Encoded`. See `examples/verify.rs` for usage
/// examples.
use std::{fmt, str};
macro_rules! maybe {
($e: expr) => {
match $e {
None => return None,
Some(v) => v,
}
};
}
const LUT64: &'static [u8; 64] =
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
fn lut(n: u8) -> u8 {
LUT64[n as usize & 0x3f]
}
fn delut(c: u8) -> Option<u8> {
match c {
43 => Some(62),
47 => Some(63),
_ if 65 <= c && c <= 90 => Some(c - 65),
_ if 97 <= c && c <= 122 => Some(c - 71),
_ if 48 <= c && c <= 57 => Some(c + 4),
_ => None,
}
}
fn quad(n: &[u8]) -> [u8; 4] {
assert_eq!(n.len(), 3);
let (b, c) = (n[1] >> 4 | n[0] << 4, n[2] >> 6 | n[1] << 2);
[lut(n[0] >> 2), lut(b), lut(c), lut(n[2])]
}
fn triplet(n: &[u8]) -> Option<[u8; 3]> {
assert_eq!(n.len(), 4);
let a = maybe!(delut(n[0]));
let b = maybe!(delut(n[1]));
let c = maybe!(delut(n[2]));
let d = maybe!(delut(n[3]));
Some([a << 2 | b >> 4, b << 4 | c >> 2, c << 6 | d])
}
fn base64_no_pad(bytes: &[u8]) -> Vec<u8> {
let mut rv = vec![];
let mut pos = 0;
while pos + 3 <= bytes.len() {
rv.extend_from_slice(&quad(&bytes[pos..pos + 3]));
pos += 3;
}
if bytes.len() - pos == 1 {
rv.push(lut(bytes[pos] >> 2));
rv.push(lut((bytes[pos] & 0x03) << 4));
} else if bytes.len() - pos == 2 {
rv.extend_from_slice(&quad(&[bytes[pos], bytes[pos + 1], 0]));
rv.pop();
} | if bytes.len() % 4 != 1 && bytes.len() > 0 {
let mut rv = vec![];
let mut pos = 0;
while pos + 4 <= bytes.len() {
let s = maybe!(triplet(&bytes[pos..pos + 4]));
rv.extend_from_slice(&s);
pos += 4;
}
if bytes.len() - pos == 2 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
rv.push(a << 2 | b >> 4);
} else if bytes.len() - pos == 3 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
let c = maybe!(delut(bytes[pos + 2]));
rv.push(a << 2 | b >> 4);
rv.push(b << 4 | c >> 2);
}
Some(rv)
} else {
None
}
}
struct Parser<'a> {
enc: &'a [u8],
pos: usize,
}
impl<'a> fmt::Debug for Parser<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?;
write!(f, "<-- {} -->", self.pos)?;
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?;
Ok(())
}
}
type Parsed<T> = Result<T, usize>;
impl<'a> Parser<'a> {
fn expect(&mut self, exp: &[u8]) -> Parsed<()> {
assert!(self.pos < self.enc.len());
if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()] != exp
{
self.err()
} else {
self.pos += exp.len();
Ok(())
}
}
fn read_until(&mut self, stopchar: u8) -> &'a [u8] {
let start = self.pos;
let stop = |c: &u8| *c == stopchar;
self.pos = match self.enc[self.pos..].iter().position(stop) {
None => self.enc.len() - 1,
Some(end) => self.pos + end,
};
&self.enc[start..self.pos]
}
fn read_u32(&mut self) -> Parsed<u32> {
let is_digit = |c: u8| 48 <= c && c <= 57;
let mut end = self.pos;
while end < self.enc.len() && is_digit(self.enc[end]) {
end += 1;
}
match str::from_utf8(&self.enc[self.pos..end]) {
Err(_) => self.err(),
Ok(s) => match s.parse() {
Err(_) => self.err(),
Ok(n) => {
self.pos = end;
Ok(n)
}
},
}
}
fn read_version(&mut self) -> Parsed<Version> {
self.read_u32().and_then(|vers| match vers {
0x10 => Ok(Version::_0x10),
0x13 => Ok(Version::_0x13),
_ => self.err(),
})
}
fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> {
let end = self.enc[self.pos..]
.iter()
.position(|c| char_set.contains(c))
.map(|sub_pos| self.pos + sub_pos)
.unwrap_or_else(|| self.enc.len());
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> {
let end = match stopchar {
None => self.enc.len(),
Some(c) => {
self.enc[self.pos..]
.iter()
.take_while(|k| **k != c)
.fold(0, |c, _| c + 1)
+ self.pos
}
};
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn err<T>(&self) -> Parsed<T> {
Err(self.pos)
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum DecodeError {
/// Byte position of first parse error
ParseError(usize),
/// Invalid Argon2 parameters given in encoding
InvalidParams(ParamErr),
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DecodeError::*;
match *self {
ParseError(pos) => write!(f, "Parse error at position {}", pos),
InvalidParams(ref perr) => {
write!(f, "Invalid hash parameters given by encoded: {}", perr)
}
}
}
}
impl Error for DecodeError {
fn description(&self) -> &str {
match *self {
DecodeError::ParseError(_) => "Hash string parse error.",
DecodeError::InvalidParams(ref perr) => perr.description(),
}
}
}
/// Represents a single Argon2 hashing session. A hash session comprises of the
/// hash algorithm parameters, salt, key, and data used to hash a given input.
#[derive(Debug, Eq, PartialEq)]
pub struct Encoded {
params: Argon2,
hash: Vec<u8>,
salt: Vec<u8>,
key: Vec<u8>,
data: Vec<u8>,
}
type Packed = (
Variant,
Version,
u32,
u32,
u32,
Vec<u8>,
Vec<u8>,
Vec<u8>,
Vec<u8>,
);
impl Encoded {
fn parse(encoded: &[u8]) -> Result<Packed, usize> {
let mut p = Parser {
enc: encoded,
pos: 0,
};
p.expect(b"$argon2")?;
let variant = match p.read_until('$' as u8) {
b"d" => Variant::Argon2d,
b"i" => Variant::Argon2i,
b"id" => Variant::Argon2id,
x => return Err(p.pos - x.len()),
};
p.expect(b"$")?;
let vers = match p.expect(b"v=") {
// Match the c reference impl's behavior, which defaults to a v0x10
// hash encoding since the `v=` field was only introduced with
// v0x13.
Err(_) => Version::_0x10,
Ok(()) => {
let vers = p.read_version()?;
p.expect(b",")?;
vers
}
};
p.expect(b"m=")?;
let kib = p.read_u32()?;
p.expect(b",t=")?;
let passes = p.read_u32()?;
p.expect(b",p=")?;
let lanes = p.read_u32()?;
let key = match p.expect(b",keyid=") {
Err(_) => vec![],
Ok(()) => p.decode64_till_one_of(b",$")?,
};
let data = match p.expect(b",data=") {
Ok(()) => p.decode64_till(Some(b'$'))?,
Err(_) => vec![],
};
p.expect(b"$")?;
let salt = p.decode64_till(Some(b'$'))?;
p.expect(b"$")?;
let hash = p.decode64_till(None)?;
Ok((variant, vers, kib, passes, lanes, key, data, salt, hash))
}
/// Reconstruct a previous hash session from serialized bytes.
pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> {
match Self::parse(encoded) {
Err(pos) => Err(DecodeError::ParseError(pos)),
Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => {
match Argon2::with_version(passes, lanes, kib, v, vers) {
Err(e) => Err(DecodeError::InvalidParams(e)),
Ok(a2) => Ok(Encoded {
params: a2,
hash: hash,
salt: salt,
key: key,
data: data,
}),
}
}
}
}
/// Serialize this hashing session into raw bytes that can later be
/// recovered by `Encoded::from_u8`.
pub fn to_u8(&self) -> Vec<u8> {
let vcode = |v| match v {
Variant::Argon2i => "i",
Variant::Argon2d => "d",
Variant::Argon2id => "id",
};
let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap();
let k_ = match &b64(&self.key[..]) {
bytes if bytes.len() > 0 => format!(",keyid={}", bytes),
_ => String::new(),
};
let x_ = match &b64(&self.data[..]) {
bytes if bytes.len() > 0 => format!(",data={}", bytes),
_ => String::new(),
};
let (var, m, t, p, vers) = self.params();
format!(
"$argon2{}$v={},m={},t={},p={}{}{}${}${}",
vcode(var),
vers as usize,
m,
t,
p,
k_,
x_,
b64(&self.salt[..]),
b64(&self.hash)
)
.into_bytes()
}
/// Generates a new hashing session from password, salt, and other byte
/// input. Parameters are:
///
/// `argon`: An `Argon2` struct representative of the desired hash algorithm
/// parameters.
///
/// `p`: Password input.
///
/// `s`: Salt.
///
/// `k`: An optional secret value.
///
/// `x`: Optional, miscellaneous associated data.
///
/// Note that `p, s, k, x` must conform to the same length constraints
/// dictated by `Argon2::hash`.
pub fn new(argon: Argon2, p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
let mut out = vec![0 as u8; defaults::LENGTH];
argon.hash(&mut out[..], p, s, k, x);
Encoded {
params: argon,
hash: out,
salt: s.iter().cloned().collect(),
key: k.iter().cloned().collect(),
data: x.iter().cloned().collect(),
}
}
/// Same as `Encoded::new`, but with the default Argon2i hash algorithm
/// parameters.
pub fn default2i(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2i), p, s, k, x)
}
/// Same as `Encoded::new`, but with the default _Argon2d_ hash algorithm
/// parameters.
pub fn default2d(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2d), p, s, k, x)
}
/// Verifies password input against the hash that was previously created in
/// this hashing session.
pub fn verify(&self, p: &[u8]) -> bool {
let mut out = [0 as u8; defaults::LENGTH];
let s = &self.salt[..];
self.params
.hash(&mut out, p, s, &self.key[..], &self.data[..]);
constant_eq(&out, &self.hash)
}
/// Provides read-only access to the Argon2 parameters of this hash.
pub fn params(&self) -> (Variant, u32, u32, u32, Version) {
self.params.params()
}
}
/// Compares two byte arrays for equality. Assumes that both are already of
/// equal length.
#[inline(never)]
pub fn constant_eq(xs: &[u8], ys: &[u8]) -> bool {
if xs.len() != ys.len() {
false
} else {
let rv = xs.iter().zip(ys.iter()).fold(0, |rv, (x, y)| rv | (x ^ y));
// this kills the optimizer.
(1 & (rv as u32).wrapping_sub(1) >> 8).wrapping_sub(1) == 0
}
}
#[cfg(test)]
mod test {
use super::{base64_no_pad, debase64_no_pad, Encoded};
const BASE64_CASES: [(&'static [u8], &'static [u8]); 5] = [
(b"any carnal pleasure.", b"YW55IGNhcm5hbCBwbGVhc3VyZS4"),
(b"any carnal pleasure", b"YW55IGNhcm5hbCBwbGVhc3VyZQ"),
(b"any carnal pleasur", b"YW55IGNhcm5hbCBwbGVhc3Vy"),
(b"any carnal pleasu", b"YW55IGNhcm5hbCBwbGVhc3U"),
(b"any carnal pleas", b"YW55IGNhcm5hbCBwbGVhcw"),
];
const ENCODED: &'static [&'static [u8]] = &[
b"$argon2i$m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
// ^ ensures that default version is 0x10.
b"$argon2i$v=16,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
b"$argon2i$v=19,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$AvsXI+N78kGHzeGwzz0VTjfBdl7MmgvBGfJ/XXyqLbA",
];
#[test]
fn test_base64_no_pad() {
for &(s, exp) in BASE64_CASES.iter() {
assert_eq!(&base64_no_pad(s)[..], exp);
}
}
#[test]
fn test_debase64_no_pad() {
for &(exp, s) in BASE64_CASES.iter() {
assert_eq!(debase64_no_pad(s).unwrap(), exp);
}
}
#[test]
fn test_verify() {
for &hash_string in ENCODED {
let v = Encoded::from_u8(hash_string).unwrap();
assert_eq!(v.verify(b"argon2i!"), true);
assert_eq!(v.verify(b"nope"), false);
}
}
#[test]
fn encode_decode() {
for &(s, _) in BASE64_CASES.iter() {
let salt = b"Yum! Extra salty";
let key = b"ff5dfa4d7a048f9db4ad0caad82e75c";
let enc = Encoded::default2i(s, salt, key, &[]);
assert_eq!(Encoded::from_u8(&enc.to_u8()), Ok(enc));
}
}
#[test]
fn bad_encoded() {
use super::DecodeError::*;
use argon2::ParamErr::*;
let cases: &[(&'static [u8], super::DecodeError)] = &[
(b"$argon2y$v=19,m=4096", ParseError(7)),
(
b"$argon2i$v=19,m=-2,t=-4,p=-4$aaaaaaaa$ffffff",
ParseError(16),
),
// ^ negative m is invalid.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff*",
ParseError(35),
),
// ^ asterisk is invalid base64 char.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff",
InvalidParams(TooFewPasses),
),
// ^ p = 0 is invalid.
(b"$argon2i$m", ParseError(9)),
];
// ^ intentionally fail Encoded::expect with undersized input
for &(case, err) in cases.iter() {
let v = Encoded::from_u8(case);
assert!(v.is_err());
assert_eq!(v.err().unwrap(), err);
}
}
} | rv
}
fn debase64_no_pad(bytes: &[u8]) -> Option<Vec<u8>> { | random_line_split |
verifier.rs | use argon2::{defaults, Argon2, ParamErr, Variant, Version};
use std::error::Error;
/// The main export here is `Encoded`. See `examples/verify.rs` for usage
/// examples.
use std::{fmt, str};
macro_rules! maybe {
($e: expr) => {
match $e {
None => return None,
Some(v) => v,
}
};
}
const LUT64: &'static [u8; 64] =
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
fn lut(n: u8) -> u8 {
LUT64[n as usize & 0x3f]
}
fn delut(c: u8) -> Option<u8> {
match c {
43 => Some(62),
47 => Some(63),
_ if 65 <= c && c <= 90 => Some(c - 65),
_ if 97 <= c && c <= 122 => Some(c - 71),
_ if 48 <= c && c <= 57 => Some(c + 4),
_ => None,
}
}
fn quad(n: &[u8]) -> [u8; 4] {
assert_eq!(n.len(), 3);
let (b, c) = (n[1] >> 4 | n[0] << 4, n[2] >> 6 | n[1] << 2);
[lut(n[0] >> 2), lut(b), lut(c), lut(n[2])]
}
fn triplet(n: &[u8]) -> Option<[u8; 3]> {
assert_eq!(n.len(), 4);
let a = maybe!(delut(n[0]));
let b = maybe!(delut(n[1]));
let c = maybe!(delut(n[2]));
let d = maybe!(delut(n[3]));
Some([a << 2 | b >> 4, b << 4 | c >> 2, c << 6 | d])
}
fn base64_no_pad(bytes: &[u8]) -> Vec<u8> {
let mut rv = vec![];
let mut pos = 0;
while pos + 3 <= bytes.len() {
rv.extend_from_slice(&quad(&bytes[pos..pos + 3]));
pos += 3;
}
if bytes.len() - pos == 1 {
rv.push(lut(bytes[pos] >> 2));
rv.push(lut((bytes[pos] & 0x03) << 4));
} else if bytes.len() - pos == 2 {
rv.extend_from_slice(&quad(&[bytes[pos], bytes[pos + 1], 0]));
rv.pop();
}
rv
}
fn debase64_no_pad(bytes: &[u8]) -> Option<Vec<u8>> |
struct Parser<'a> {
enc: &'a [u8],
pos: usize,
}
impl<'a> fmt::Debug for Parser<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?;
write!(f, "<-- {} -->", self.pos)?;
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?;
Ok(())
}
}
type Parsed<T> = Result<T, usize>;
impl<'a> Parser<'a> {
fn expect(&mut self, exp: &[u8]) -> Parsed<()> {
assert!(self.pos < self.enc.len());
if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()] != exp
{
self.err()
} else {
self.pos += exp.len();
Ok(())
}
}
fn read_until(&mut self, stopchar: u8) -> &'a [u8] {
let start = self.pos;
let stop = |c: &u8| *c == stopchar;
self.pos = match self.enc[self.pos..].iter().position(stop) {
None => self.enc.len() - 1,
Some(end) => self.pos + end,
};
&self.enc[start..self.pos]
}
fn read_u32(&mut self) -> Parsed<u32> {
let is_digit = |c: u8| 48 <= c && c <= 57;
let mut end = self.pos;
while end < self.enc.len() && is_digit(self.enc[end]) {
end += 1;
}
match str::from_utf8(&self.enc[self.pos..end]) {
Err(_) => self.err(),
Ok(s) => match s.parse() {
Err(_) => self.err(),
Ok(n) => {
self.pos = end;
Ok(n)
}
},
}
}
fn read_version(&mut self) -> Parsed<Version> {
self.read_u32().and_then(|vers| match vers {
0x10 => Ok(Version::_0x10),
0x13 => Ok(Version::_0x13),
_ => self.err(),
})
}
fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> {
let end = self.enc[self.pos..]
.iter()
.position(|c| char_set.contains(c))
.map(|sub_pos| self.pos + sub_pos)
.unwrap_or_else(|| self.enc.len());
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> {
let end = match stopchar {
None => self.enc.len(),
Some(c) => {
self.enc[self.pos..]
.iter()
.take_while(|k| **k != c)
.fold(0, |c, _| c + 1)
+ self.pos
}
};
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn err<T>(&self) -> Parsed<T> {
Err(self.pos)
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum DecodeError {
/// Byte position of first parse error
ParseError(usize),
/// Invalid Argon2 parameters given in encoding
InvalidParams(ParamErr),
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DecodeError::*;
match *self {
ParseError(pos) => write!(f, "Parse error at position {}", pos),
InvalidParams(ref perr) => {
write!(f, "Invalid hash parameters given by encoded: {}", perr)
}
}
}
}
impl Error for DecodeError {
fn description(&self) -> &str {
match *self {
DecodeError::ParseError(_) => "Hash string parse error.",
DecodeError::InvalidParams(ref perr) => perr.description(),
}
}
}
/// Represents a single Argon2 hashing session. A hash session comprises of the
/// hash algorithm parameters, salt, key, and data used to hash a given input.
#[derive(Debug, Eq, PartialEq)]
pub struct Encoded {
params: Argon2,
hash: Vec<u8>,
salt: Vec<u8>,
key: Vec<u8>,
data: Vec<u8>,
}
type Packed = (
Variant,
Version,
u32,
u32,
u32,
Vec<u8>,
Vec<u8>,
Vec<u8>,
Vec<u8>,
);
impl Encoded {
fn parse(encoded: &[u8]) -> Result<Packed, usize> {
let mut p = Parser {
enc: encoded,
pos: 0,
};
p.expect(b"$argon2")?;
let variant = match p.read_until('$' as u8) {
b"d" => Variant::Argon2d,
b"i" => Variant::Argon2i,
b"id" => Variant::Argon2id,
x => return Err(p.pos - x.len()),
};
p.expect(b"$")?;
let vers = match p.expect(b"v=") {
// Match the c reference impl's behavior, which defaults to a v0x10
// hash encoding since the `v=` field was only introduced with
// v0x13.
Err(_) => Version::_0x10,
Ok(()) => {
let vers = p.read_version()?;
p.expect(b",")?;
vers
}
};
p.expect(b"m=")?;
let kib = p.read_u32()?;
p.expect(b",t=")?;
let passes = p.read_u32()?;
p.expect(b",p=")?;
let lanes = p.read_u32()?;
let key = match p.expect(b",keyid=") {
Err(_) => vec![],
Ok(()) => p.decode64_till_one_of(b",$")?,
};
let data = match p.expect(b",data=") {
Ok(()) => p.decode64_till(Some(b'$'))?,
Err(_) => vec![],
};
p.expect(b"$")?;
let salt = p.decode64_till(Some(b'$'))?;
p.expect(b"$")?;
let hash = p.decode64_till(None)?;
Ok((variant, vers, kib, passes, lanes, key, data, salt, hash))
}
/// Reconstruct a previous hash session from serialized bytes.
pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> {
match Self::parse(encoded) {
Err(pos) => Err(DecodeError::ParseError(pos)),
Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => {
match Argon2::with_version(passes, lanes, kib, v, vers) {
Err(e) => Err(DecodeError::InvalidParams(e)),
Ok(a2) => Ok(Encoded {
params: a2,
hash: hash,
salt: salt,
key: key,
data: data,
}),
}
}
}
}
/// Serialize this hashing session into raw bytes that can later be
/// recovered by `Encoded::from_u8`.
pub fn to_u8(&self) -> Vec<u8> {
let vcode = |v| match v {
Variant::Argon2i => "i",
Variant::Argon2d => "d",
Variant::Argon2id => "id",
};
let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap();
let k_ = match &b64(&self.key[..]) {
bytes if bytes.len() > 0 => format!(",keyid={}", bytes),
_ => String::new(),
};
let x_ = match &b64(&self.data[..]) {
bytes if bytes.len() > 0 => format!(",data={}", bytes),
_ => String::new(),
};
let (var, m, t, p, vers) = self.params();
format!(
"$argon2{}$v={},m={},t={},p={}{}{}${}${}",
vcode(var),
vers as usize,
m,
t,
p,
k_,
x_,
b64(&self.salt[..]),
b64(&self.hash)
)
.into_bytes()
}
/// Generates a new hashing session from password, salt, and other byte
/// input. Parameters are:
///
/// `argon`: An `Argon2` struct representative of the desired hash algorithm
/// parameters.
///
/// `p`: Password input.
///
/// `s`: Salt.
///
/// `k`: An optional secret value.
///
/// `x`: Optional, miscellaneous associated data.
///
/// Note that `p, s, k, x` must conform to the same length constraints
/// dictated by `Argon2::hash`.
pub fn new(argon: Argon2, p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
let mut out = vec![0 as u8; defaults::LENGTH];
argon.hash(&mut out[..], p, s, k, x);
Encoded {
params: argon,
hash: out,
salt: s.iter().cloned().collect(),
key: k.iter().cloned().collect(),
data: x.iter().cloned().collect(),
}
}
/// Same as `Encoded::new`, but with the default Argon2i hash algorithm
/// parameters.
pub fn default2i(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2i), p, s, k, x)
}
/// Same as `Encoded::new`, but with the default _Argon2d_ hash algorithm
/// parameters.
pub fn default2d(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2d), p, s, k, x)
}
/// Verifies password input against the hash that was previously created in
/// this hashing session.
pub fn verify(&self, p: &[u8]) -> bool {
let mut out = [0 as u8; defaults::LENGTH];
let s = &self.salt[..];
self.params
.hash(&mut out, p, s, &self.key[..], &self.data[..]);
constant_eq(&out, &self.hash)
}
/// Provides read-only access to the Argon2 parameters of this hash.
pub fn params(&self) -> (Variant, u32, u32, u32, Version) {
self.params.params()
}
}
/// Compares two byte arrays for equality. Assumes that both are already of
/// equal length.
#[inline(never)]
pub fn constant_eq(xs: &[u8], ys: &[u8]) -> bool {
if xs.len() != ys.len() {
false
} else {
let rv = xs.iter().zip(ys.iter()).fold(0, |rv, (x, y)| rv | (x ^ y));
// this kills the optimizer.
(1 & (rv as u32).wrapping_sub(1) >> 8).wrapping_sub(1) == 0
}
}
#[cfg(test)]
mod test {
use super::{base64_no_pad, debase64_no_pad, Encoded};
const BASE64_CASES: [(&'static [u8], &'static [u8]); 5] = [
(b"any carnal pleasure.", b"YW55IGNhcm5hbCBwbGVhc3VyZS4"),
(b"any carnal pleasure", b"YW55IGNhcm5hbCBwbGVhc3VyZQ"),
(b"any carnal pleasur", b"YW55IGNhcm5hbCBwbGVhc3Vy"),
(b"any carnal pleasu", b"YW55IGNhcm5hbCBwbGVhc3U"),
(b"any carnal pleas", b"YW55IGNhcm5hbCBwbGVhcw"),
];
const ENCODED: &'static [&'static [u8]] = &[
b"$argon2i$m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
// ^ ensures that default version is 0x10.
b"$argon2i$v=16,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
b"$argon2i$v=19,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$AvsXI+N78kGHzeGwzz0VTjfBdl7MmgvBGfJ/XXyqLbA",
];
#[test]
fn test_base64_no_pad() {
for &(s, exp) in BASE64_CASES.iter() {
assert_eq!(&base64_no_pad(s)[..], exp);
}
}
#[test]
fn test_debase64_no_pad() {
for &(exp, s) in BASE64_CASES.iter() {
assert_eq!(debase64_no_pad(s).unwrap(), exp);
}
}
#[test]
fn test_verify() {
for &hash_string in ENCODED {
let v = Encoded::from_u8(hash_string).unwrap();
assert_eq!(v.verify(b"argon2i!"), true);
assert_eq!(v.verify(b"nope"), false);
}
}
#[test]
fn encode_decode() {
for &(s, _) in BASE64_CASES.iter() {
let salt = b"Yum! Extra salty";
let key = b"ff5dfa4d7a048f9db4ad0caad82e75c";
let enc = Encoded::default2i(s, salt, key, &[]);
assert_eq!(Encoded::from_u8(&enc.to_u8()), Ok(enc));
}
}
#[test]
fn bad_encoded() {
use super::DecodeError::*;
use argon2::ParamErr::*;
let cases: &[(&'static [u8], super::DecodeError)] = &[
(b"$argon2y$v=19,m=4096", ParseError(7)),
(
b"$argon2i$v=19,m=-2,t=-4,p=-4$aaaaaaaa$ffffff",
ParseError(16),
),
// ^ negative m is invalid.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff*",
ParseError(35),
),
// ^ asterisk is invalid base64 char.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff",
InvalidParams(TooFewPasses),
),
// ^ p = 0 is invalid.
(b"$argon2i$m", ParseError(9)),
];
// ^ intentionally fail Encoded::expect with undersized input
for &(case, err) in cases.iter() {
let v = Encoded::from_u8(case);
assert!(v.is_err());
assert_eq!(v.err().unwrap(), err);
}
}
}
| {
if bytes.len() % 4 != 1 && bytes.len() > 0 {
let mut rv = vec![];
let mut pos = 0;
while pos + 4 <= bytes.len() {
let s = maybe!(triplet(&bytes[pos..pos + 4]));
rv.extend_from_slice(&s);
pos += 4;
}
if bytes.len() - pos == 2 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
rv.push(a << 2 | b >> 4);
} else if bytes.len() - pos == 3 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
let c = maybe!(delut(bytes[pos + 2]));
rv.push(a << 2 | b >> 4);
rv.push(b << 4 | c >> 2);
}
Some(rv)
} else {
None
}
} | identifier_body |
verifier.rs | use argon2::{defaults, Argon2, ParamErr, Variant, Version};
use std::error::Error;
/// The main export here is `Encoded`. See `examples/verify.rs` for usage
/// examples.
use std::{fmt, str};
macro_rules! maybe {
($e: expr) => {
match $e {
None => return None,
Some(v) => v,
}
};
}
const LUT64: &'static [u8; 64] =
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
fn lut(n: u8) -> u8 {
LUT64[n as usize & 0x3f]
}
fn delut(c: u8) -> Option<u8> {
match c {
43 => Some(62),
47 => Some(63),
_ if 65 <= c && c <= 90 => Some(c - 65),
_ if 97 <= c && c <= 122 => Some(c - 71),
_ if 48 <= c && c <= 57 => Some(c + 4),
_ => None,
}
}
fn quad(n: &[u8]) -> [u8; 4] {
assert_eq!(n.len(), 3);
let (b, c) = (n[1] >> 4 | n[0] << 4, n[2] >> 6 | n[1] << 2);
[lut(n[0] >> 2), lut(b), lut(c), lut(n[2])]
}
fn triplet(n: &[u8]) -> Option<[u8; 3]> {
assert_eq!(n.len(), 4);
let a = maybe!(delut(n[0]));
let b = maybe!(delut(n[1]));
let c = maybe!(delut(n[2]));
let d = maybe!(delut(n[3]));
Some([a << 2 | b >> 4, b << 4 | c >> 2, c << 6 | d])
}
fn base64_no_pad(bytes: &[u8]) -> Vec<u8> {
let mut rv = vec![];
let mut pos = 0;
while pos + 3 <= bytes.len() {
rv.extend_from_slice(&quad(&bytes[pos..pos + 3]));
pos += 3;
}
if bytes.len() - pos == 1 {
rv.push(lut(bytes[pos] >> 2));
rv.push(lut((bytes[pos] & 0x03) << 4));
} else if bytes.len() - pos == 2 {
rv.extend_from_slice(&quad(&[bytes[pos], bytes[pos + 1], 0]));
rv.pop();
}
rv
}
fn debase64_no_pad(bytes: &[u8]) -> Option<Vec<u8>> {
if bytes.len() % 4 != 1 && bytes.len() > 0 {
let mut rv = vec![];
let mut pos = 0;
while pos + 4 <= bytes.len() {
let s = maybe!(triplet(&bytes[pos..pos + 4]));
rv.extend_from_slice(&s);
pos += 4;
}
if bytes.len() - pos == 2 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
rv.push(a << 2 | b >> 4);
} else if bytes.len() - pos == 3 {
let a = maybe!(delut(bytes[pos]));
let b = maybe!(delut(bytes[pos + 1]));
let c = maybe!(delut(bytes[pos + 2]));
rv.push(a << 2 | b >> 4);
rv.push(b << 4 | c >> 2);
}
Some(rv)
} else {
None
}
}
struct Parser<'a> {
enc: &'a [u8],
pos: usize,
}
impl<'a> fmt::Debug for Parser<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[..self.pos]))?;
write!(f, "<-- {} -->", self.pos)?;
write!(f, "{:?}", String::from_utf8_lossy(&self.enc[self.pos..]))?;
Ok(())
}
}
type Parsed<T> = Result<T, usize>;
impl<'a> Parser<'a> {
fn expect(&mut self, exp: &[u8]) -> Parsed<()> {
assert!(self.pos < self.enc.len());
if self.enc.len() - self.pos < exp.len() || &self.enc[self.pos..self.pos + exp.len()] != exp
{
self.err()
} else {
self.pos += exp.len();
Ok(())
}
}
fn read_until(&mut self, stopchar: u8) -> &'a [u8] {
let start = self.pos;
let stop = |c: &u8| *c == stopchar;
self.pos = match self.enc[self.pos..].iter().position(stop) {
None => self.enc.len() - 1,
Some(end) => self.pos + end,
};
&self.enc[start..self.pos]
}
fn read_u32(&mut self) -> Parsed<u32> {
let is_digit = |c: u8| 48 <= c && c <= 57;
let mut end = self.pos;
while end < self.enc.len() && is_digit(self.enc[end]) {
end += 1;
}
match str::from_utf8(&self.enc[self.pos..end]) {
Err(_) => self.err(),
Ok(s) => match s.parse() {
Err(_) => self.err(),
Ok(n) => {
self.pos = end;
Ok(n)
}
},
}
}
fn read_version(&mut self) -> Parsed<Version> {
self.read_u32().and_then(|vers| match vers {
0x10 => Ok(Version::_0x10),
0x13 => Ok(Version::_0x13),
_ => self.err(),
})
}
fn decode64_till_one_of(&mut self, char_set: &[u8]) -> Parsed<Vec<u8>> {
let end = self.enc[self.pos..]
.iter()
.position(|c| char_set.contains(c))
.map(|sub_pos| self.pos + sub_pos)
.unwrap_or_else(|| self.enc.len());
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn decode64_till(&mut self, stopchar: Option<u8>) -> Parsed<Vec<u8>> {
let end = match stopchar {
None => self.enc.len(),
Some(c) => {
self.enc[self.pos..]
.iter()
.take_while(|k| **k != c)
.fold(0, |c, _| c + 1)
+ self.pos
}
};
match debase64_no_pad(&self.enc[self.pos..end]) {
None => self.err(),
Some(rv) => {
self.pos = end;
Ok(rv)
}
}
}
fn err<T>(&self) -> Parsed<T> {
Err(self.pos)
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum DecodeError {
/// Byte position of first parse error
ParseError(usize),
/// Invalid Argon2 parameters given in encoding
InvalidParams(ParamErr),
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DecodeError::*;
match *self {
ParseError(pos) => write!(f, "Parse error at position {}", pos),
InvalidParams(ref perr) => {
write!(f, "Invalid hash parameters given by encoded: {}", perr)
}
}
}
}
impl Error for DecodeError {
fn description(&self) -> &str {
match *self {
DecodeError::ParseError(_) => "Hash string parse error.",
DecodeError::InvalidParams(ref perr) => perr.description(),
}
}
}
/// Represents a single Argon2 hashing session. A hash session comprises of the
/// hash algorithm parameters, salt, key, and data used to hash a given input.
#[derive(Debug, Eq, PartialEq)]
pub struct Encoded {
params: Argon2,
hash: Vec<u8>,
salt: Vec<u8>,
key: Vec<u8>,
data: Vec<u8>,
}
type Packed = (
Variant,
Version,
u32,
u32,
u32,
Vec<u8>,
Vec<u8>,
Vec<u8>,
Vec<u8>,
);
impl Encoded {
fn parse(encoded: &[u8]) -> Result<Packed, usize> {
let mut p = Parser {
enc: encoded,
pos: 0,
};
p.expect(b"$argon2")?;
let variant = match p.read_until('$' as u8) {
b"d" => Variant::Argon2d,
b"i" => Variant::Argon2i,
b"id" => Variant::Argon2id,
x => return Err(p.pos - x.len()),
};
p.expect(b"$")?;
let vers = match p.expect(b"v=") {
// Match the c reference impl's behavior, which defaults to a v0x10
// hash encoding since the `v=` field was only introduced with
// v0x13.
Err(_) => Version::_0x10,
Ok(()) => {
let vers = p.read_version()?;
p.expect(b",")?;
vers
}
};
p.expect(b"m=")?;
let kib = p.read_u32()?;
p.expect(b",t=")?;
let passes = p.read_u32()?;
p.expect(b",p=")?;
let lanes = p.read_u32()?;
let key = match p.expect(b",keyid=") {
Err(_) => vec![],
Ok(()) => p.decode64_till_one_of(b",$")?,
};
let data = match p.expect(b",data=") {
Ok(()) => p.decode64_till(Some(b'$'))?,
Err(_) => vec![],
};
p.expect(b"$")?;
let salt = p.decode64_till(Some(b'$'))?;
p.expect(b"$")?;
let hash = p.decode64_till(None)?;
Ok((variant, vers, kib, passes, lanes, key, data, salt, hash))
}
/// Reconstruct a previous hash session from serialized bytes.
pub fn from_u8(encoded: &[u8]) -> Result<Self, DecodeError> {
match Self::parse(encoded) {
Err(pos) => Err(DecodeError::ParseError(pos)),
Ok((v, vers, kib, passes, lanes, key, data, salt, hash)) => {
match Argon2::with_version(passes, lanes, kib, v, vers) {
Err(e) => Err(DecodeError::InvalidParams(e)),
Ok(a2) => Ok(Encoded {
params: a2,
hash: hash,
salt: salt,
key: key,
data: data,
}),
}
}
}
}
/// Serialize this hashing session into raw bytes that can later be
/// recovered by `Encoded::from_u8`.
pub fn | (&self) -> Vec<u8> {
let vcode = |v| match v {
Variant::Argon2i => "i",
Variant::Argon2d => "d",
Variant::Argon2id => "id",
};
let b64 = |x| String::from_utf8(base64_no_pad(x)).unwrap();
let k_ = match &b64(&self.key[..]) {
bytes if bytes.len() > 0 => format!(",keyid={}", bytes),
_ => String::new(),
};
let x_ = match &b64(&self.data[..]) {
bytes if bytes.len() > 0 => format!(",data={}", bytes),
_ => String::new(),
};
let (var, m, t, p, vers) = self.params();
format!(
"$argon2{}$v={},m={},t={},p={}{}{}${}${}",
vcode(var),
vers as usize,
m,
t,
p,
k_,
x_,
b64(&self.salt[..]),
b64(&self.hash)
)
.into_bytes()
}
/// Generates a new hashing session from password, salt, and other byte
/// input. Parameters are:
///
/// `argon`: An `Argon2` struct representative of the desired hash algorithm
/// parameters.
///
/// `p`: Password input.
///
/// `s`: Salt.
///
/// `k`: An optional secret value.
///
/// `x`: Optional, miscellaneous associated data.
///
/// Note that `p, s, k, x` must conform to the same length constraints
/// dictated by `Argon2::hash`.
pub fn new(argon: Argon2, p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
let mut out = vec![0 as u8; defaults::LENGTH];
argon.hash(&mut out[..], p, s, k, x);
Encoded {
params: argon,
hash: out,
salt: s.iter().cloned().collect(),
key: k.iter().cloned().collect(),
data: x.iter().cloned().collect(),
}
}
/// Same as `Encoded::new`, but with the default Argon2i hash algorithm
/// parameters.
pub fn default2i(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2i), p, s, k, x)
}
/// Same as `Encoded::new`, but with the default _Argon2d_ hash algorithm
/// parameters.
pub fn default2d(p: &[u8], s: &[u8], k: &[u8], x: &[u8]) -> Self {
Self::new(Argon2::default(Variant::Argon2d), p, s, k, x)
}
/// Verifies password input against the hash that was previously created in
/// this hashing session.
pub fn verify(&self, p: &[u8]) -> bool {
let mut out = [0 as u8; defaults::LENGTH];
let s = &self.salt[..];
self.params
.hash(&mut out, p, s, &self.key[..], &self.data[..]);
constant_eq(&out, &self.hash)
}
/// Provides read-only access to the Argon2 parameters of this hash.
pub fn params(&self) -> (Variant, u32, u32, u32, Version) {
self.params.params()
}
}
/// Compares two byte arrays for equality. Assumes that both are already of
/// equal length.
#[inline(never)]
pub fn constant_eq(xs: &[u8], ys: &[u8]) -> bool {
if xs.len() != ys.len() {
false
} else {
let rv = xs.iter().zip(ys.iter()).fold(0, |rv, (x, y)| rv | (x ^ y));
// this kills the optimizer.
(1 & (rv as u32).wrapping_sub(1) >> 8).wrapping_sub(1) == 0
}
}
#[cfg(test)]
mod test {
use super::{base64_no_pad, debase64_no_pad, Encoded};
const BASE64_CASES: [(&'static [u8], &'static [u8]); 5] = [
(b"any carnal pleasure.", b"YW55IGNhcm5hbCBwbGVhc3VyZS4"),
(b"any carnal pleasure", b"YW55IGNhcm5hbCBwbGVhc3VyZQ"),
(b"any carnal pleasur", b"YW55IGNhcm5hbCBwbGVhc3Vy"),
(b"any carnal pleasu", b"YW55IGNhcm5hbCBwbGVhc3U"),
(b"any carnal pleas", b"YW55IGNhcm5hbCBwbGVhcw"),
];
const ENCODED: &'static [&'static [u8]] = &[
b"$argon2i$m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
// ^ ensures that default version is 0x10.
b"$argon2i$v=16,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$Eh1lW3mjkhlMLRQdE7vXZnvwDXSGLBfXa6BGK4a1J3s",
b"$argon2i$v=19,m=4096,t=3,p=1$dG9kbzogZnV6eiB0ZXN0cw\
$AvsXI+N78kGHzeGwzz0VTjfBdl7MmgvBGfJ/XXyqLbA",
];
#[test]
fn test_base64_no_pad() {
for &(s, exp) in BASE64_CASES.iter() {
assert_eq!(&base64_no_pad(s)[..], exp);
}
}
#[test]
fn test_debase64_no_pad() {
for &(exp, s) in BASE64_CASES.iter() {
assert_eq!(debase64_no_pad(s).unwrap(), exp);
}
}
#[test]
fn test_verify() {
for &hash_string in ENCODED {
let v = Encoded::from_u8(hash_string).unwrap();
assert_eq!(v.verify(b"argon2i!"), true);
assert_eq!(v.verify(b"nope"), false);
}
}
#[test]
fn encode_decode() {
for &(s, _) in BASE64_CASES.iter() {
let salt = b"Yum! Extra salty";
let key = b"ff5dfa4d7a048f9db4ad0caad82e75c";
let enc = Encoded::default2i(s, salt, key, &[]);
assert_eq!(Encoded::from_u8(&enc.to_u8()), Ok(enc));
}
}
#[test]
fn bad_encoded() {
use super::DecodeError::*;
use argon2::ParamErr::*;
let cases: &[(&'static [u8], super::DecodeError)] = &[
(b"$argon2y$v=19,m=4096", ParseError(7)),
(
b"$argon2i$v=19,m=-2,t=-4,p=-4$aaaaaaaa$ffffff",
ParseError(16),
),
// ^ negative m is invalid.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff*",
ParseError(35),
),
// ^ asterisk is invalid base64 char.
(
b"$argon2i$v=19,m=0,t=0,p=0$aaaaaaaa$ffffff",
InvalidParams(TooFewPasses),
),
// ^ p = 0 is invalid.
(b"$argon2i$m", ParseError(9)),
];
// ^ intentionally fail Encoded::expect with undersized input
for &(case, err) in cases.iter() {
let v = Encoded::from_u8(case);
assert!(v.is_err());
assert_eq!(v.err().unwrap(), err);
}
}
}
| to_u8 | identifier_name |
disk.rs | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! VM disk image file format I/O.
use std::cmp::min;
use std::fmt::Debug;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use base::get_filesystem_type;
use base::info;
use base::AsRawDescriptors;
use base::FileAllocate;
use base::FileReadWriteAtVolatile;
use base::FileSetLen;
use base::FileSync;
use base::PunchHole;
use base::WriteZeroesAt;
use cros_async::AllocateMode;
use cros_async::BackingMemory;
use cros_async::Executor;
use cros_async::IoSourceExt;
use thiserror::Error as ThisError;
mod asynchronous;
#[allow(unused)]
pub(crate) use asynchronous::AsyncDiskFileWrapper;
#[cfg(feature = "qcow")]
mod qcow;
#[cfg(feature = "qcow")]
pub use qcow::QcowFile;
#[cfg(feature = "qcow")]
pub use qcow::QCOW_MAGIC;
mod sys;
#[cfg(feature = "composite-disk")]
mod composite;
#[cfg(feature = "composite-disk")]
use composite::CompositeDiskFile;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC_LEN;
#[cfg(feature = "composite-disk")]
mod gpt;
#[cfg(feature = "composite-disk")]
pub use composite::create_composite_disk;
#[cfg(feature = "composite-disk")]
pub use composite::create_zero_filler;
#[cfg(feature = "composite-disk")]
pub use composite::Error as CompositeError;
#[cfg(feature = "composite-disk")]
pub use composite::ImagePartitionType;
#[cfg(feature = "composite-disk")]
pub use composite::PartitionInfo;
#[cfg(feature = "composite-disk")]
pub use gpt::Error as GptError;
#[cfg(feature = "android-sparse")]
mod android_sparse;
#[cfg(feature = "android-sparse")]
use android_sparse::AndroidSparse;
#[cfg(feature = "android-sparse")]
use android_sparse::SPARSE_HEADER_MAGIC;
/// Nesting depth limit for disk formats that can open other disk files.
pub const MAX_NESTING_DEPTH: u32 = 10;
#[derive(ThisError, Debug)]
pub enum Error {
#[error("failed to create block device: {0}")]
BlockDeviceNew(base::Error),
#[error("requested file conversion not supported")]
ConversionNotSupported,
#[cfg(feature = "android-sparse")]
#[error("failure in android sparse disk: {0}")]
CreateAndroidSparseDisk(android_sparse::Error),
#[cfg(feature = "composite-disk")]
#[error("failure in composite disk: {0}")]
CreateCompositeDisk(composite::Error),
#[error("failure creating single file disk: {0}")]
CreateSingleFileDisk(cros_async::AsyncError),
#[error("failure with fallocate: {0}")]
Fallocate(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
Fsync(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
IoFsync(io::Error),
#[error("checking host fs type: {0}")]
HostFsType(base::Error),
#[error("maximum disk nesting depth exceeded")]
MaxNestingDepthExceeded,
#[error("failure to punch hole: {0}")]
PunchHole(io::Error),
#[cfg(feature = "qcow")]
#[error("failure in qcow: {0}")]
QcowError(qcow::Error),
#[error("failed to read data: {0}")]
ReadingData(io::Error),
#[error("failed to read header: {0}")]
ReadingHeader(io::Error),
#[error("failed to read to memory: {0}")]
ReadToMem(cros_async::AsyncError),
#[error("failed to seek file: {0}")]
SeekingFile(io::Error),
#[error("failed to set file size: {0}")]
SettingFileSize(io::Error),
#[error("unknown disk type")]
UnknownType,
#[error("failed to write from memory: {0}")]
WriteFromMem(cros_async::AsyncError),
#[error("failed to write from vec: {0}")]
WriteFromVec(cros_async::AsyncError),
#[error("failed to write zeroes: {0}")]
WriteZeroes(io::Error),
#[error("failed to write data: {0}")]
WritingData(io::Error),
#[cfg(windows)]
#[error("failed to set disk file sparse: {0}")]
SetSparseFailure(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
/// A trait for getting the length of a disk image or raw block device.
pub trait DiskGetLen {
/// Get the current length of the disk in bytes.
fn get_len(&self) -> io::Result<u64>;
}
impl DiskGetLen for File {
fn get_len(&self) -> io::Result<u64> {
let mut s = self;
let orig_seek = s.seek(SeekFrom::Current(0))?;
let end = s.seek(SeekFrom::End(0))? as u64;
s.seek(SeekFrom::Start(orig_seek))?;
Ok(end)
}
}
/// The prerequisites necessary to support a block device.
#[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds.
pub trait DiskFile:
FileSetLen
+ DiskGetLen
+ FileSync
+ FileReadWriteAtVolatile
+ PunchHole
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug
{
}
impl<
D: FileSetLen
+ DiskGetLen
+ FileSync
+ PunchHole
+ FileReadWriteAtVolatile
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug,
> DiskFile for D
{
}
/// A `DiskFile` that can be converted for asychronous access.
pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send {
/// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk.
/// Used to convert a standard disk image to an async disk image. This conversion and the
/// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is
/// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned
/// to the main device thread if the block device is destroyed or reset.
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>;
}
impl ToAsyncDisk for File {
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> {
Ok(Box::new(SingleFileDisk::new(*self, ex)?))
}
}
/// The variants of image files on the host that can be used as virtual disks.
#[derive(Debug, PartialEq, Eq)]
pub enum ImageType {
Raw,
Qcow2,
CompositeDisk,
AndroidSparse,
}
fn log_host_fs_type(file: &File) -> Result<()> {
let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?;
info!("Disk image file is hosted on file system type {:x}", fstype);
Ok(())
}
/// Detect the type of an image file by checking for a valid header of the supported formats.
pub fn detect_image_type(file: &File) -> Result<ImageType> {
let mut f = file;
let disk_size = f.get_len().map_err(Error::SeekingFile)?;
let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?;
f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
info!("disk size {}, ", disk_size);
log_host_fs_type(f)?;
// Try to read the disk in a nicely-aligned block size unless the whole file is smaller.
const MAGIC_BLOCK_SIZE: usize = 4096;
#[repr(align(4096))]
struct BlockAlignedBuffer {
data: [u8; MAGIC_BLOCK_SIZE],
}
let mut magic = BlockAlignedBuffer {
data: [0u8; MAGIC_BLOCK_SIZE],
};
let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 {
MAGIC_BLOCK_SIZE
} else {
// This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and
// therefore is representable in usize.
disk_size as usize
};
f.read_exact(&mut magic.data[0..magic_read_len])
.map_err(Error::ReadingHeader)?;
f.seek(SeekFrom::Start(orig_seek))
.map_err(Error::SeekingFile)?;
#[cfg(feature = "composite-disk")]
if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) {
if cdisk_magic == CDISK_MAGIC.as_bytes() {
return Ok(ImageType::CompositeDisk);
}
}
#[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features.
if let Some(magic4) = magic.data.get(0..4) {
#[cfg(feature = "qcow")]
if magic4 == QCOW_MAGIC.to_be_bytes() {
return Ok(ImageType::Qcow2);
}
#[cfg(feature = "android-sparse")]
if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() {
return Ok(ImageType::AndroidSparse);
}
}
Ok(ImageType::Raw)
}
/// Inspect the image file type and create an appropriate disk file to match it.
pub fn create_disk_file(
raw_image: File,
is_sparse_file: bool,
// max_nesting_depth is only used if the composite-disk or qcow features are enabled.
#[allow(unused_variables)] mut max_nesting_depth: u32,
// image_path is only used if the composite-disk feature is enabled.
#[allow(unused_variables)] image_path: &Path,
) -> Result<Box<dyn DiskFile>> {
if max_nesting_depth == 0 {
return Err(Error::MaxNestingDepthExceeded);
}
#[allow(unused_assignments)]
{
max_nesting_depth -= 1;
}
let image_type = detect_image_type(&raw_image)?;
Ok(match image_type {
ImageType::Raw => {
sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?;
Box::new(raw_image) as Box<dyn DiskFile>
}
#[cfg(feature = "qcow")]
ImageType::Qcow2 => {
Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?)
as Box<dyn DiskFile>
}
#[cfg(feature = "composite-disk")]
ImageType::CompositeDisk => {
// Valid composite disk header present
Box::new(
CompositeDiskFile::from_file(
raw_image,
is_sparse_file,
max_nesting_depth,
image_path,
)
.map_err(Error::CreateCompositeDisk)?,
) as Box<dyn DiskFile>
}
#[cfg(feature = "android-sparse")]
ImageType::AndroidSparse => {
Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?)
as Box<dyn DiskFile>
}
#[allow(unreachable_patterns)]
_ => return Err(Error::UnknownType),
})
}
/// An asynchronously accessible disk.
#[async_trait(?Send)]
pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate {
/// Returns the inner file consuming self.
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile>;
/// Asynchronously fsyncs any completed operations to the disk.
async fn fsync(&self) -> Result<()>;
/// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`.
/// `mem_offsets` is similar to an iovec except relative to the start of `mem`.
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`.
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Replaces a range of bytes with a hole.
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>;
/// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written.
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>;
}
/// A disk backed by a single file that implements `AsyncDisk` for access.
pub struct SingleFileDisk {
inner: Box<dyn IoSourceExt<File>>,
}
impl SingleFileDisk {
pub fn new(disk: File, ex: &Executor) -> Result<Self> {
ex.async_from(disk)
.map_err(Error::CreateSingleFileDisk)
.map(|inner| SingleFileDisk { inner })
}
}
impl DiskGetLen for SingleFileDisk {
fn get_len(&self) -> io::Result<u64> {
self.inner.as_source().get_len()
}
}
impl FileSetLen for SingleFileDisk {
fn set_len(&self, len: u64) -> io::Result<()> {
self.inner.as_source().set_len(len)
}
}
impl FileAllocate for SingleFileDisk {
fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> {
self.inner.as_source_mut().allocate(offset, len)
}
}
#[async_trait(?Send)]
impl AsyncDisk for SingleFileDisk {
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
Box::new(self.inner.into_source())
}
async fn fsync(&self) -> Result<()> {
self.inner.fsync().await.map_err(Error::Fsync)
}
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.read_to_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::ReadToMem)
}
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.write_from_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::WriteFromMem)
}
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> {
self.inner
.fallocate(file_offset, length, AllocateMode::PunchHole)
.await
.map_err(Error::Fallocate)
}
async fn | (&self, file_offset: u64, length: u64) -> Result<()> {
if self
.inner
.fallocate(file_offset, length, AllocateMode::ZeroRange)
.await
.is_ok()
{
return Ok(());
}
// Fall back to writing zeros if fallocate doesn't work.
let buf_size = min(length, 0x10000);
let mut nwritten = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size) as usize;
let buf = vec![0u8; write_size];
nwritten += self
.inner
.write_from_vec(Some(file_offset + nwritten as u64), buf)
.await
.map(|(n, _)| n as u64)
.map_err(Error::WriteFromVec)?;
}
Ok(())
}
}
| write_zeroes_at | identifier_name |
disk.rs | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! VM disk image file format I/O.
use std::cmp::min;
use std::fmt::Debug;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use base::get_filesystem_type;
use base::info;
use base::AsRawDescriptors;
use base::FileAllocate;
use base::FileReadWriteAtVolatile;
use base::FileSetLen;
use base::FileSync;
use base::PunchHole;
use base::WriteZeroesAt;
use cros_async::AllocateMode;
use cros_async::BackingMemory;
use cros_async::Executor;
use cros_async::IoSourceExt;
use thiserror::Error as ThisError;
mod asynchronous;
#[allow(unused)]
pub(crate) use asynchronous::AsyncDiskFileWrapper;
#[cfg(feature = "qcow")]
mod qcow;
#[cfg(feature = "qcow")]
pub use qcow::QcowFile;
#[cfg(feature = "qcow")]
pub use qcow::QCOW_MAGIC;
mod sys;
#[cfg(feature = "composite-disk")]
mod composite;
#[cfg(feature = "composite-disk")]
use composite::CompositeDiskFile;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC_LEN;
#[cfg(feature = "composite-disk")]
mod gpt;
#[cfg(feature = "composite-disk")]
pub use composite::create_composite_disk;
#[cfg(feature = "composite-disk")]
pub use composite::create_zero_filler;
#[cfg(feature = "composite-disk")]
pub use composite::Error as CompositeError;
#[cfg(feature = "composite-disk")]
pub use composite::ImagePartitionType;
#[cfg(feature = "composite-disk")]
pub use composite::PartitionInfo;
#[cfg(feature = "composite-disk")]
pub use gpt::Error as GptError;
#[cfg(feature = "android-sparse")]
mod android_sparse;
#[cfg(feature = "android-sparse")]
use android_sparse::AndroidSparse;
#[cfg(feature = "android-sparse")]
use android_sparse::SPARSE_HEADER_MAGIC;
/// Nesting depth limit for disk formats that can open other disk files.
pub const MAX_NESTING_DEPTH: u32 = 10;
#[derive(ThisError, Debug)]
pub enum Error {
#[error("failed to create block device: {0}")]
BlockDeviceNew(base::Error),
#[error("requested file conversion not supported")]
ConversionNotSupported,
#[cfg(feature = "android-sparse")]
#[error("failure in android sparse disk: {0}")]
CreateAndroidSparseDisk(android_sparse::Error),
#[cfg(feature = "composite-disk")]
#[error("failure in composite disk: {0}")]
CreateCompositeDisk(composite::Error),
#[error("failure creating single file disk: {0}")]
CreateSingleFileDisk(cros_async::AsyncError),
#[error("failure with fallocate: {0}")]
Fallocate(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
Fsync(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
IoFsync(io::Error),
#[error("checking host fs type: {0}")]
HostFsType(base::Error),
#[error("maximum disk nesting depth exceeded")]
MaxNestingDepthExceeded,
#[error("failure to punch hole: {0}")]
PunchHole(io::Error),
#[cfg(feature = "qcow")]
#[error("failure in qcow: {0}")]
QcowError(qcow::Error),
#[error("failed to read data: {0}")]
ReadingData(io::Error),
#[error("failed to read header: {0}")]
ReadingHeader(io::Error),
#[error("failed to read to memory: {0}")]
ReadToMem(cros_async::AsyncError),
#[error("failed to seek file: {0}")]
SeekingFile(io::Error),
#[error("failed to set file size: {0}")]
SettingFileSize(io::Error),
#[error("unknown disk type")]
UnknownType,
#[error("failed to write from memory: {0}")]
WriteFromMem(cros_async::AsyncError),
#[error("failed to write from vec: {0}")]
WriteFromVec(cros_async::AsyncError),
#[error("failed to write zeroes: {0}")]
WriteZeroes(io::Error),
#[error("failed to write data: {0}")]
WritingData(io::Error),
#[cfg(windows)]
#[error("failed to set disk file sparse: {0}")]
SetSparseFailure(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
/// A trait for getting the length of a disk image or raw block device.
pub trait DiskGetLen {
/// Get the current length of the disk in bytes.
fn get_len(&self) -> io::Result<u64>;
}
impl DiskGetLen for File {
fn get_len(&self) -> io::Result<u64> {
let mut s = self;
let orig_seek = s.seek(SeekFrom::Current(0))?;
let end = s.seek(SeekFrom::End(0))? as u64;
s.seek(SeekFrom::Start(orig_seek))?;
Ok(end)
}
}
/// The prerequisites necessary to support a block device.
#[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds.
pub trait DiskFile:
FileSetLen
+ DiskGetLen
+ FileSync
+ FileReadWriteAtVolatile
+ PunchHole
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug
{
}
impl<
D: FileSetLen
+ DiskGetLen
+ FileSync
+ PunchHole
+ FileReadWriteAtVolatile
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug,
> DiskFile for D
{
}
/// A `DiskFile` that can be converted for asychronous access.
pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send {
/// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk.
/// Used to convert a standard disk image to an async disk image. This conversion and the
/// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is
/// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned
/// to the main device thread if the block device is destroyed or reset.
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>;
}
impl ToAsyncDisk for File {
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> {
Ok(Box::new(SingleFileDisk::new(*self, ex)?))
}
}
/// The variants of image files on the host that can be used as virtual disks.
#[derive(Debug, PartialEq, Eq)]
pub enum ImageType {
Raw,
Qcow2,
CompositeDisk,
AndroidSparse,
}
fn log_host_fs_type(file: &File) -> Result<()> {
let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?;
info!("Disk image file is hosted on file system type {:x}", fstype);
Ok(())
}
/// Detect the type of an image file by checking for a valid header of the supported formats.
pub fn detect_image_type(file: &File) -> Result<ImageType> {
let mut f = file;
let disk_size = f.get_len().map_err(Error::SeekingFile)?;
let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?;
f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
info!("disk size {}, ", disk_size);
log_host_fs_type(f)?;
// Try to read the disk in a nicely-aligned block size unless the whole file is smaller.
const MAGIC_BLOCK_SIZE: usize = 4096;
#[repr(align(4096))]
struct BlockAlignedBuffer {
data: [u8; MAGIC_BLOCK_SIZE],
}
let mut magic = BlockAlignedBuffer {
data: [0u8; MAGIC_BLOCK_SIZE],
};
let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 {
MAGIC_BLOCK_SIZE
} else {
// This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and
// therefore is representable in usize.
disk_size as usize
};
f.read_exact(&mut magic.data[0..magic_read_len])
.map_err(Error::ReadingHeader)?;
f.seek(SeekFrom::Start(orig_seek))
.map_err(Error::SeekingFile)?;
#[cfg(feature = "composite-disk")]
if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) {
if cdisk_magic == CDISK_MAGIC.as_bytes() {
return Ok(ImageType::CompositeDisk);
}
}
#[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features.
if let Some(magic4) = magic.data.get(0..4) {
#[cfg(feature = "qcow")]
if magic4 == QCOW_MAGIC.to_be_bytes() {
return Ok(ImageType::Qcow2);
}
#[cfg(feature = "android-sparse")]
if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() {
return Ok(ImageType::AndroidSparse);
}
}
Ok(ImageType::Raw)
}
| // max_nesting_depth is only used if the composite-disk or qcow features are enabled.
#[allow(unused_variables)] mut max_nesting_depth: u32,
// image_path is only used if the composite-disk feature is enabled.
#[allow(unused_variables)] image_path: &Path,
) -> Result<Box<dyn DiskFile>> {
if max_nesting_depth == 0 {
return Err(Error::MaxNestingDepthExceeded);
}
#[allow(unused_assignments)]
{
max_nesting_depth -= 1;
}
let image_type = detect_image_type(&raw_image)?;
Ok(match image_type {
ImageType::Raw => {
sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?;
Box::new(raw_image) as Box<dyn DiskFile>
}
#[cfg(feature = "qcow")]
ImageType::Qcow2 => {
Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?)
as Box<dyn DiskFile>
}
#[cfg(feature = "composite-disk")]
ImageType::CompositeDisk => {
// Valid composite disk header present
Box::new(
CompositeDiskFile::from_file(
raw_image,
is_sparse_file,
max_nesting_depth,
image_path,
)
.map_err(Error::CreateCompositeDisk)?,
) as Box<dyn DiskFile>
}
#[cfg(feature = "android-sparse")]
ImageType::AndroidSparse => {
Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?)
as Box<dyn DiskFile>
}
#[allow(unreachable_patterns)]
_ => return Err(Error::UnknownType),
})
}
/// An asynchronously accessible disk.
#[async_trait(?Send)]
pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate {
/// Returns the inner file consuming self.
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile>;
/// Asynchronously fsyncs any completed operations to the disk.
async fn fsync(&self) -> Result<()>;
/// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`.
/// `mem_offsets` is similar to an iovec except relative to the start of `mem`.
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`.
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Replaces a range of bytes with a hole.
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>;
/// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written.
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>;
}
/// A disk backed by a single file that implements `AsyncDisk` for access.
pub struct SingleFileDisk {
inner: Box<dyn IoSourceExt<File>>,
}
impl SingleFileDisk {
pub fn new(disk: File, ex: &Executor) -> Result<Self> {
ex.async_from(disk)
.map_err(Error::CreateSingleFileDisk)
.map(|inner| SingleFileDisk { inner })
}
}
impl DiskGetLen for SingleFileDisk {
fn get_len(&self) -> io::Result<u64> {
self.inner.as_source().get_len()
}
}
impl FileSetLen for SingleFileDisk {
fn set_len(&self, len: u64) -> io::Result<()> {
self.inner.as_source().set_len(len)
}
}
impl FileAllocate for SingleFileDisk {
fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> {
self.inner.as_source_mut().allocate(offset, len)
}
}
#[async_trait(?Send)]
impl AsyncDisk for SingleFileDisk {
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
Box::new(self.inner.into_source())
}
async fn fsync(&self) -> Result<()> {
self.inner.fsync().await.map_err(Error::Fsync)
}
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.read_to_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::ReadToMem)
}
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.write_from_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::WriteFromMem)
}
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> {
self.inner
.fallocate(file_offset, length, AllocateMode::PunchHole)
.await
.map_err(Error::Fallocate)
}
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()> {
if self
.inner
.fallocate(file_offset, length, AllocateMode::ZeroRange)
.await
.is_ok()
{
return Ok(());
}
// Fall back to writing zeros if fallocate doesn't work.
let buf_size = min(length, 0x10000);
let mut nwritten = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size) as usize;
let buf = vec![0u8; write_size];
nwritten += self
.inner
.write_from_vec(Some(file_offset + nwritten as u64), buf)
.await
.map(|(n, _)| n as u64)
.map_err(Error::WriteFromVec)?;
}
Ok(())
}
} | /// Inspect the image file type and create an appropriate disk file to match it.
pub fn create_disk_file(
raw_image: File,
is_sparse_file: bool, | random_line_split |
disk.rs | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! VM disk image file format I/O.
use std::cmp::min;
use std::fmt::Debug;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::path::Path;
use std::sync::Arc;
use async_trait::async_trait;
use base::get_filesystem_type;
use base::info;
use base::AsRawDescriptors;
use base::FileAllocate;
use base::FileReadWriteAtVolatile;
use base::FileSetLen;
use base::FileSync;
use base::PunchHole;
use base::WriteZeroesAt;
use cros_async::AllocateMode;
use cros_async::BackingMemory;
use cros_async::Executor;
use cros_async::IoSourceExt;
use thiserror::Error as ThisError;
mod asynchronous;
#[allow(unused)]
pub(crate) use asynchronous::AsyncDiskFileWrapper;
#[cfg(feature = "qcow")]
mod qcow;
#[cfg(feature = "qcow")]
pub use qcow::QcowFile;
#[cfg(feature = "qcow")]
pub use qcow::QCOW_MAGIC;
mod sys;
#[cfg(feature = "composite-disk")]
mod composite;
#[cfg(feature = "composite-disk")]
use composite::CompositeDiskFile;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC;
#[cfg(feature = "composite-disk")]
use composite::CDISK_MAGIC_LEN;
#[cfg(feature = "composite-disk")]
mod gpt;
#[cfg(feature = "composite-disk")]
pub use composite::create_composite_disk;
#[cfg(feature = "composite-disk")]
pub use composite::create_zero_filler;
#[cfg(feature = "composite-disk")]
pub use composite::Error as CompositeError;
#[cfg(feature = "composite-disk")]
pub use composite::ImagePartitionType;
#[cfg(feature = "composite-disk")]
pub use composite::PartitionInfo;
#[cfg(feature = "composite-disk")]
pub use gpt::Error as GptError;
#[cfg(feature = "android-sparse")]
mod android_sparse;
#[cfg(feature = "android-sparse")]
use android_sparse::AndroidSparse;
#[cfg(feature = "android-sparse")]
use android_sparse::SPARSE_HEADER_MAGIC;
/// Nesting depth limit for disk formats that can open other disk files.
pub const MAX_NESTING_DEPTH: u32 = 10;
#[derive(ThisError, Debug)]
pub enum Error {
#[error("failed to create block device: {0}")]
BlockDeviceNew(base::Error),
#[error("requested file conversion not supported")]
ConversionNotSupported,
#[cfg(feature = "android-sparse")]
#[error("failure in android sparse disk: {0}")]
CreateAndroidSparseDisk(android_sparse::Error),
#[cfg(feature = "composite-disk")]
#[error("failure in composite disk: {0}")]
CreateCompositeDisk(composite::Error),
#[error("failure creating single file disk: {0}")]
CreateSingleFileDisk(cros_async::AsyncError),
#[error("failure with fallocate: {0}")]
Fallocate(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
Fsync(cros_async::AsyncError),
#[error("failure with fsync: {0}")]
IoFsync(io::Error),
#[error("checking host fs type: {0}")]
HostFsType(base::Error),
#[error("maximum disk nesting depth exceeded")]
MaxNestingDepthExceeded,
#[error("failure to punch hole: {0}")]
PunchHole(io::Error),
#[cfg(feature = "qcow")]
#[error("failure in qcow: {0}")]
QcowError(qcow::Error),
#[error("failed to read data: {0}")]
ReadingData(io::Error),
#[error("failed to read header: {0}")]
ReadingHeader(io::Error),
#[error("failed to read to memory: {0}")]
ReadToMem(cros_async::AsyncError),
#[error("failed to seek file: {0}")]
SeekingFile(io::Error),
#[error("failed to set file size: {0}")]
SettingFileSize(io::Error),
#[error("unknown disk type")]
UnknownType,
#[error("failed to write from memory: {0}")]
WriteFromMem(cros_async::AsyncError),
#[error("failed to write from vec: {0}")]
WriteFromVec(cros_async::AsyncError),
#[error("failed to write zeroes: {0}")]
WriteZeroes(io::Error),
#[error("failed to write data: {0}")]
WritingData(io::Error),
#[cfg(windows)]
#[error("failed to set disk file sparse: {0}")]
SetSparseFailure(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
/// A trait for getting the length of a disk image or raw block device.
pub trait DiskGetLen {
/// Get the current length of the disk in bytes.
fn get_len(&self) -> io::Result<u64>;
}
impl DiskGetLen for File {
fn get_len(&self) -> io::Result<u64> {
let mut s = self;
let orig_seek = s.seek(SeekFrom::Current(0))?;
let end = s.seek(SeekFrom::End(0))? as u64;
s.seek(SeekFrom::Start(orig_seek))?;
Ok(end)
}
}
/// The prerequisites necessary to support a block device.
#[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds.
pub trait DiskFile:
FileSetLen
+ DiskGetLen
+ FileSync
+ FileReadWriteAtVolatile
+ PunchHole
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug
{
}
impl<
D: FileSetLen
+ DiskGetLen
+ FileSync
+ PunchHole
+ FileReadWriteAtVolatile
+ WriteZeroesAt
+ FileAllocate
+ ToAsyncDisk
+ Send
+ AsRawDescriptors
+ Debug,
> DiskFile for D
{
}
/// A `DiskFile` that can be converted for asychronous access.
pub trait ToAsyncDisk: AsRawDescriptors + DiskGetLen + Send {
/// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk.
/// Used to convert a standard disk image to an async disk image. This conversion and the
/// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is
/// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned
/// to the main device thread if the block device is destroyed or reset.
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>;
}
impl ToAsyncDisk for File {
fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> {
Ok(Box::new(SingleFileDisk::new(*self, ex)?))
}
}
/// The variants of image files on the host that can be used as virtual disks.
#[derive(Debug, PartialEq, Eq)]
pub enum ImageType {
Raw,
Qcow2,
CompositeDisk,
AndroidSparse,
}
fn log_host_fs_type(file: &File) -> Result<()> {
let fstype = get_filesystem_type(file).map_err(Error::HostFsType)?;
info!("Disk image file is hosted on file system type {:x}", fstype);
Ok(())
}
/// Detect the type of an image file by checking for a valid header of the supported formats.
pub fn detect_image_type(file: &File) -> Result<ImageType> {
let mut f = file;
let disk_size = f.get_len().map_err(Error::SeekingFile)?;
let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?;
f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?;
info!("disk size {}, ", disk_size);
log_host_fs_type(f)?;
// Try to read the disk in a nicely-aligned block size unless the whole file is smaller.
const MAGIC_BLOCK_SIZE: usize = 4096;
#[repr(align(4096))]
struct BlockAlignedBuffer {
data: [u8; MAGIC_BLOCK_SIZE],
}
let mut magic = BlockAlignedBuffer {
data: [0u8; MAGIC_BLOCK_SIZE],
};
let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 {
MAGIC_BLOCK_SIZE
} else {
// This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and
// therefore is representable in usize.
disk_size as usize
};
f.read_exact(&mut magic.data[0..magic_read_len])
.map_err(Error::ReadingHeader)?;
f.seek(SeekFrom::Start(orig_seek))
.map_err(Error::SeekingFile)?;
#[cfg(feature = "composite-disk")]
if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) {
if cdisk_magic == CDISK_MAGIC.as_bytes() {
return Ok(ImageType::CompositeDisk);
}
}
#[allow(unused_variables)] // magic4 is only used with the qcow or android-sparse features.
if let Some(magic4) = magic.data.get(0..4) {
#[cfg(feature = "qcow")]
if magic4 == QCOW_MAGIC.to_be_bytes() {
return Ok(ImageType::Qcow2);
}
#[cfg(feature = "android-sparse")]
if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() {
return Ok(ImageType::AndroidSparse);
}
}
Ok(ImageType::Raw)
}
/// Inspect the image file type and create an appropriate disk file to match it.
pub fn create_disk_file(
raw_image: File,
is_sparse_file: bool,
// max_nesting_depth is only used if the composite-disk or qcow features are enabled.
#[allow(unused_variables)] mut max_nesting_depth: u32,
// image_path is only used if the composite-disk feature is enabled.
#[allow(unused_variables)] image_path: &Path,
) -> Result<Box<dyn DiskFile>> {
if max_nesting_depth == 0 {
return Err(Error::MaxNestingDepthExceeded);
}
#[allow(unused_assignments)]
{
max_nesting_depth -= 1;
}
let image_type = detect_image_type(&raw_image)?;
Ok(match image_type {
ImageType::Raw => |
#[cfg(feature = "qcow")]
ImageType::Qcow2 => {
Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?)
as Box<dyn DiskFile>
}
#[cfg(feature = "composite-disk")]
ImageType::CompositeDisk => {
// Valid composite disk header present
Box::new(
CompositeDiskFile::from_file(
raw_image,
is_sparse_file,
max_nesting_depth,
image_path,
)
.map_err(Error::CreateCompositeDisk)?,
) as Box<dyn DiskFile>
}
#[cfg(feature = "android-sparse")]
ImageType::AndroidSparse => {
Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?)
as Box<dyn DiskFile>
}
#[allow(unreachable_patterns)]
_ => return Err(Error::UnknownType),
})
}
/// An asynchronously accessible disk.
#[async_trait(?Send)]
pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate {
/// Returns the inner file consuming self.
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile>;
/// Asynchronously fsyncs any completed operations to the disk.
async fn fsync(&self) -> Result<()>;
/// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`.
/// `mem_offsets` is similar to an iovec except relative to the start of `mem`.
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`.
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize>;
/// Replaces a range of bytes with a hole.
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>;
/// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written.
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>;
}
/// A disk backed by a single file that implements `AsyncDisk` for access.
pub struct SingleFileDisk {
inner: Box<dyn IoSourceExt<File>>,
}
impl SingleFileDisk {
pub fn new(disk: File, ex: &Executor) -> Result<Self> {
ex.async_from(disk)
.map_err(Error::CreateSingleFileDisk)
.map(|inner| SingleFileDisk { inner })
}
}
impl DiskGetLen for SingleFileDisk {
fn get_len(&self) -> io::Result<u64> {
self.inner.as_source().get_len()
}
}
impl FileSetLen for SingleFileDisk {
fn set_len(&self, len: u64) -> io::Result<()> {
self.inner.as_source().set_len(len)
}
}
impl FileAllocate for SingleFileDisk {
fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> {
self.inner.as_source_mut().allocate(offset, len)
}
}
#[async_trait(?Send)]
impl AsyncDisk for SingleFileDisk {
fn into_inner(self: Box<Self>) -> Box<dyn DiskFile> {
Box::new(self.inner.into_source())
}
async fn fsync(&self) -> Result<()> {
self.inner.fsync().await.map_err(Error::Fsync)
}
async fn read_to_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.read_to_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::ReadToMem)
}
async fn write_from_mem<'a>(
&'a self,
file_offset: u64,
mem: Arc<dyn BackingMemory + Send + Sync>,
mem_offsets: &'a [cros_async::MemRegion],
) -> Result<usize> {
self.inner
.write_from_mem(Some(file_offset), mem, mem_offsets)
.await
.map_err(Error::WriteFromMem)
}
async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> {
self.inner
.fallocate(file_offset, length, AllocateMode::PunchHole)
.await
.map_err(Error::Fallocate)
}
async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()> {
if self
.inner
.fallocate(file_offset, length, AllocateMode::ZeroRange)
.await
.is_ok()
{
return Ok(());
}
// Fall back to writing zeros if fallocate doesn't work.
let buf_size = min(length, 0x10000);
let mut nwritten = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size) as usize;
let buf = vec![0u8; write_size];
nwritten += self
.inner
.write_from_vec(Some(file_offset + nwritten as u64), buf)
.await
.map(|(n, _)| n as u64)
.map_err(Error::WriteFromVec)?;
}
Ok(())
}
}
| {
sys::apply_raw_disk_file_options(&raw_image, is_sparse_file)?;
Box::new(raw_image) as Box<dyn DiskFile>
} | conditional_block |
gen_functionalization_type.py | from tools.codegen.api import cpp
from tools.codegen.api.types import (
DispatcherSignature, Binding, FunctionalizationLambda, ViewInverseSignature
)
from tools.codegen.api.translate import translate
from tools.codegen.context import with_native_function
from tools.codegen.model import (
Argument, NativeFunction, SchemaKind, BackendIndex,
Tag, FunctionSchema, SelfArgument, TensorOptionsArguments
)
from tools.codegen.selective_build.selector import SelectiveBuilder
from typing import List, Optional, Union, Tuple
from tools.codegen.utils import mapMaybe
def modifies_arguments(f: NativeFunction) -> bool:
return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
# This function constructs the return statement for the kernels that contain mutations
# It mostly just needs to special case multi-output returns to wrap the result in a tuple
def return_str(f: NativeFunction) -> str:
if len(f.func.arguments.out) != 0:
if len(f.func.arguments.out) > 1:
return_names = ', '.join(a.name for a in f.func.arguments.out)
return f'return {DispatcherSignature.from_schema(f.func).returns_type().cpp_type()}({return_names});'
else:
return f'return {f.func.arguments.out[0].name}'
if f.func.arguments.self_arg is not None:
return f'return {f.func.arguments.self_arg.argument.name}'
return ''
def wrapper_name(func: FunctionSchema) -> str:
if func.name.overload_name:
return f'{cpp.name(func)}_{func.name.overload_name}'
else:
return cpp.name(func)
def is_tensor_like(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> bool:
return isinstance(a, SelfArgument) or (isinstance(a, Argument) and a.type.is_tensor_like())
# unwraps all tensor-like arguments, returning:
# (1) a string containing all of the logic that does the unwrapping
# (2) a context, to be used by translate(), with all of the relevant bindings.
def unwrap_tensor_args(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
unwrapped_name = f'{arg.name}_'
unwrapped_tensor_args.append(
f'auto {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});')
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
# (1) a string containing all of the logic that does the conversions.
# (2) a context, to be used by translate(), with all of the relevant bindings.
def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
a_ = arg.name
unwrapped_name = f'{arg.name}_meta'
unwrapped_tensor_args.append(
f"auto {unwrapped_name} = at::native::empty_strided_meta({a_}.sizes(), {a_}.strides(), \
/*dtype=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \
/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);"
)
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# Generates the Functionalization kernel for:
# - ops that create aliases (e.g. transpose())
# - ops that are views AND mutations (e.g. transpose_())
def emit_view_functionalization_body(
f: NativeFunction,
functional_op: NativeFunction
) -> str:
# view op case
assert f.is_view_op
if f.tag is Tag.inplace_view:
# This op is both an inplace op AND a view op.
# See Note [Functionalization Pass - Inplace View Ops] for details.
# I currently have the view meta call into the out-of-place variant of the view, to avoid
# having to define an extra ~20 inplace {view}_inverse_ functions.
# Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
# I'm assuming that every inplace-view op has a corresponding out-of-place view op,
# with the same name but the trailing underscore removed.
# This is currently asserted at parse time in gen.py (see error_check_native_functions).
assert f.func.kind() is SchemaKind.inplace
# Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand.
assert functional_op is not None
api_name = functional_op.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(functional_op.func)
else:
api_name = f.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(f.func)
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)]
forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False)
reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True)
# The meta API call should use the same arguments, but convert all tensors to meta tensors first.
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)]
if f.tag is Tag.inplace_view:
# See Note [Functionalization Pass - Inplace View Ops] for more details
return f"""
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
at::functionalization::impl::mutate_view_meta(self, view_meta);
{unwrap_tensor_args_str}
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
}}
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
return self;
"""
else:
return f"""
{unwrap_tensor_args_str}
{return_type} tmp_output;
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)});
// I'm fusing the [alias removal], [mutation removal], [add views back] passes together.
// Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal).
}}
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
return out;
"""
# Generates the Functionalization kernel for inplace ops
def emit_inplace_functionalization_body(
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> str:
# mutation case
assert(modifies_arguments(f))
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
maybe_return = '' if len(f.func.returns) == 0 else 'return '
sync_tensor_args = '\n '.join(mapMaybe(
lambda arg: f'at::functionalization::impl::sync({arg.name});'
if arg.type.is_tensor_like() else None,
f.func.arguments.flat_all))
if functional_op is None:
# We can't functionalize this inplace op, since we don't know what the corresponding functional op is.
inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)]
warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \
because it couldn't find an out-of-place equivalent of the operator to call. \
Instead, it's calling the inplace/view operator directly. \
If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name))
return f"""
if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{
TORCH_WARN("{warn_str}");
}}
{sync_tensor_args}
{unwrap_tensor_args_str}
at::AutoDispatchSkipFunctionalize guard;
// Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops.
{maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)});
"""
# call the out-of-place variant of the op
functional_sig = DispatcherSignature.from_schema(functional_op.func) |
mutable_input_post_processing = '\n'.join([
f"""
auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name});
{a.name}_functional->replace_(tmp_output);
{a.name}_functional->commit_update();"""
for a in f.func.arguments.flat_non_out
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()])
return f"""
{sync_tensor_args}
{unwrap_tensor_args_str}
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
// The functionalization pass explicitly doesn't pass out= parameters to the redispatch
tmp_output = at::_ops::{functional_op.func.name.unambiguous_name()}::redispatch({', '.join(functional_exprs)});
}}
{mutable_input_post_processing}
{return_str(f)};"""
def emit_declaration_for_noncomposite_views(f: NativeFunction) -> str:
# For every view op, we need a corresponding "inverse view" function.
# This generates the declarations so we get a good compiler error when someone adds a new view.
view_inverse_sig = ViewInverseSignature(f)
return view_inverse_sig.decl()
# The below functions generate RegisterFunctionalization.cpp
# These files provide the kernels that run the functionalization pass, which can be opted into
# per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
def gen_functionalization_registration(
selector: SelectiveBuilder,
f: NativeFunction,
composite_implicit_autograd_index: BackendIndex
) -> Optional[str]:
@with_native_function
def emit_registration_helper(f: NativeFunction) -> Optional[str]:
# Note: for now, this logic is meant to avoid registering functionalization kernels for mobile.
# At some point, Vulkan we'll want to use functionalization and we'll need to change this.
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
return None
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
metadata = composite_implicit_autograd_index.get_kernel(f)
assert metadata is not None
native_api_name = metadata.kernel
sig = DispatcherSignature.from_schema(f.func)
# Note [Composite view ops in the functionalization pass]
# We don't need to worry about implemententing functionalization kernels for views with
# CompositeImplicitAutograd kernels, because we can just decompose them into their base operators.
# We can't just opt the entire Functionalization dispatch key into the composite keyset though,
# because we don't want to decompose non-view ops that are composite, like `at::ones`.
registration_str = f'static_cast<{sig.ptr_type()}>(at::native::{native_api_name})'
else:
registration_str = f'TORCH_FN(functionalization::{wrapper_name(f.func)})'
return f'm.impl("{f.func.name}", {registration_str});'
return emit_registration_helper(f)
def gen_functionalization_definition(
selector: SelectiveBuilder,
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> Optional[str]:
@with_native_function
def emit_definition_helper(f: NativeFunction) -> Optional[str]:
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
return None
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
# See Note [Composite view ops in the functionalization pass]
return None
# order is important here, ops that are both views and mutations should hit the view path.
if f.is_view_op:
# Every view op is expected to have a functional counterpart (e.g. transpose_() -> transpose())
assert functional_op is not None
body_str = emit_view_functionalization_body(f, functional_op)
else:
# inplace op
assert modifies_arguments(f)
body_str = emit_inplace_functionalization_body(f, functional_op)
sig = DispatcherSignature.from_schema(f.func)
return f"""
{sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
{body_str}
}}
"""
return emit_definition_helper(f)
# See Note [Functionalization Pass: View Inverses].
@with_native_function
def gen_functionalization_view_inverse_declaration(f: NativeFunction) -> Optional[str]:
# We only need to generate view_inverse declarations for view ops that:
# - aren't composite (since they'll decompose and we'll get them for free).
# - aren't inplace (since they should have a corresponding functional version, which we call instead).
if f.is_view_op and not f.has_composite_implicit_autograd_kernel and not modifies_arguments(f):
output = emit_declaration_for_noncomposite_views(f)
return output
return None | functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)] | random_line_split |
gen_functionalization_type.py | from tools.codegen.api import cpp
from tools.codegen.api.types import (
DispatcherSignature, Binding, FunctionalizationLambda, ViewInverseSignature
)
from tools.codegen.api.translate import translate
from tools.codegen.context import with_native_function
from tools.codegen.model import (
Argument, NativeFunction, SchemaKind, BackendIndex,
Tag, FunctionSchema, SelfArgument, TensorOptionsArguments
)
from tools.codegen.selective_build.selector import SelectiveBuilder
from typing import List, Optional, Union, Tuple
from tools.codegen.utils import mapMaybe
def modifies_arguments(f: NativeFunction) -> bool:
return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
# This function constructs the return statement for the kernels that contain mutations
# It mostly just needs to special case multi-output returns to wrap the result in a tuple
def return_str(f: NativeFunction) -> str:
if len(f.func.arguments.out) != 0:
if len(f.func.arguments.out) > 1:
return_names = ', '.join(a.name for a in f.func.arguments.out)
return f'return {DispatcherSignature.from_schema(f.func).returns_type().cpp_type()}({return_names});'
else:
return f'return {f.func.arguments.out[0].name}'
if f.func.arguments.self_arg is not None:
return f'return {f.func.arguments.self_arg.argument.name}'
return ''
def wrapper_name(func: FunctionSchema) -> str:
if func.name.overload_name:
return f'{cpp.name(func)}_{func.name.overload_name}'
else:
return cpp.name(func)
def is_tensor_like(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> bool:
return isinstance(a, SelfArgument) or (isinstance(a, Argument) and a.type.is_tensor_like())
# unwraps all tensor-like arguments, returning:
# (1) a string containing all of the logic that does the unwrapping
# (2) a context, to be used by translate(), with all of the relevant bindings.
def unwrap_tensor_args(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
unwrapped_name = f'{arg.name}_'
unwrapped_tensor_args.append(
f'auto {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});')
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
# (1) a string containing all of the logic that does the conversions.
# (2) a context, to be used by translate(), with all of the relevant bindings.
def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
a_ = arg.name
unwrapped_name = f'{arg.name}_meta'
unwrapped_tensor_args.append(
f"auto {unwrapped_name} = at::native::empty_strided_meta({a_}.sizes(), {a_}.strides(), \
/*dtype=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \
/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);"
)
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# Generates the Functionalization kernel for:
# - ops that create aliases (e.g. transpose())
# - ops that are views AND mutations (e.g. transpose_())
def emit_view_functionalization_body(
f: NativeFunction,
functional_op: NativeFunction
) -> str:
# view op case
assert f.is_view_op
if f.tag is Tag.inplace_view:
# This op is both an inplace op AND a view op.
# See Note [Functionalization Pass - Inplace View Ops] for details.
# I currently have the view meta call into the out-of-place variant of the view, to avoid
# having to define an extra ~20 inplace {view}_inverse_ functions.
# Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
# I'm assuming that every inplace-view op has a corresponding out-of-place view op,
# with the same name but the trailing underscore removed.
# This is currently asserted at parse time in gen.py (see error_check_native_functions).
assert f.func.kind() is SchemaKind.inplace
# Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand.
assert functional_op is not None
api_name = functional_op.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(functional_op.func)
else:
api_name = f.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(f.func)
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)]
forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False)
reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True)
# The meta API call should use the same arguments, but convert all tensors to meta tensors first.
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)]
if f.tag is Tag.inplace_view:
# See Note [Functionalization Pass - Inplace View Ops] for more details
return f"""
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
at::functionalization::impl::mutate_view_meta(self, view_meta);
{unwrap_tensor_args_str}
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
}}
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
return self;
"""
else:
return f"""
{unwrap_tensor_args_str}
{return_type} tmp_output;
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)});
// I'm fusing the [alias removal], [mutation removal], [add views back] passes together.
// Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal).
}}
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
return out;
"""
# Generates the Functionalization kernel for inplace ops
def emit_inplace_functionalization_body(
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> str:
# mutation case
assert(modifies_arguments(f))
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
maybe_return = '' if len(f.func.returns) == 0 else 'return '
sync_tensor_args = '\n '.join(mapMaybe(
lambda arg: f'at::functionalization::impl::sync({arg.name});'
if arg.type.is_tensor_like() else None,
f.func.arguments.flat_all))
if functional_op is None:
# We can't functionalize this inplace op, since we don't know what the corresponding functional op is.
inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)]
warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \
because it couldn't find an out-of-place equivalent of the operator to call. \
Instead, it's calling the inplace/view operator directly. \
If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name))
return f"""
if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{
TORCH_WARN("{warn_str}");
}}
{sync_tensor_args}
{unwrap_tensor_args_str}
at::AutoDispatchSkipFunctionalize guard;
// Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops.
{maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)});
"""
# call the out-of-place variant of the op
functional_sig = DispatcherSignature.from_schema(functional_op.func)
functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)]
mutable_input_post_processing = '\n'.join([
f"""
auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name});
{a.name}_functional->replace_(tmp_output);
{a.name}_functional->commit_update();"""
for a in f.func.arguments.flat_non_out
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()])
return f"""
{sync_tensor_args}
{unwrap_tensor_args_str}
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
// The functionalization pass explicitly doesn't pass out= parameters to the redispatch
tmp_output = at::_ops::{functional_op.func.name.unambiguous_name()}::redispatch({', '.join(functional_exprs)});
}}
{mutable_input_post_processing}
{return_str(f)};"""
def emit_declaration_for_noncomposite_views(f: NativeFunction) -> str:
# For every view op, we need a corresponding "inverse view" function.
# This generates the declarations so we get a good compiler error when someone adds a new view.
view_inverse_sig = ViewInverseSignature(f)
return view_inverse_sig.decl()
# The below functions generate RegisterFunctionalization.cpp
# These files provide the kernels that run the functionalization pass, which can be opted into
# per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
def | (
selector: SelectiveBuilder,
f: NativeFunction,
composite_implicit_autograd_index: BackendIndex
) -> Optional[str]:
@with_native_function
def emit_registration_helper(f: NativeFunction) -> Optional[str]:
# Note: for now, this logic is meant to avoid registering functionalization kernels for mobile.
# At some point, Vulkan we'll want to use functionalization and we'll need to change this.
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
return None
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
metadata = composite_implicit_autograd_index.get_kernel(f)
assert metadata is not None
native_api_name = metadata.kernel
sig = DispatcherSignature.from_schema(f.func)
# Note [Composite view ops in the functionalization pass]
# We don't need to worry about implemententing functionalization kernels for views with
# CompositeImplicitAutograd kernels, because we can just decompose them into their base operators.
# We can't just opt the entire Functionalization dispatch key into the composite keyset though,
# because we don't want to decompose non-view ops that are composite, like `at::ones`.
registration_str = f'static_cast<{sig.ptr_type()}>(at::native::{native_api_name})'
else:
registration_str = f'TORCH_FN(functionalization::{wrapper_name(f.func)})'
return f'm.impl("{f.func.name}", {registration_str});'
return emit_registration_helper(f)
def gen_functionalization_definition(
selector: SelectiveBuilder,
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> Optional[str]:
@with_native_function
def emit_definition_helper(f: NativeFunction) -> Optional[str]:
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
return None
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
# See Note [Composite view ops in the functionalization pass]
return None
# order is important here, ops that are both views and mutations should hit the view path.
if f.is_view_op:
# Every view op is expected to have a functional counterpart (e.g. transpose_() -> transpose())
assert functional_op is not None
body_str = emit_view_functionalization_body(f, functional_op)
else:
# inplace op
assert modifies_arguments(f)
body_str = emit_inplace_functionalization_body(f, functional_op)
sig = DispatcherSignature.from_schema(f.func)
return f"""
{sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
{body_str}
}}
"""
return emit_definition_helper(f)
# See Note [Functionalization Pass: View Inverses].
@with_native_function
def gen_functionalization_view_inverse_declaration(f: NativeFunction) -> Optional[str]:
# We only need to generate view_inverse declarations for view ops that:
# - aren't composite (since they'll decompose and we'll get them for free).
# - aren't inplace (since they should have a corresponding functional version, which we call instead).
if f.is_view_op and not f.has_composite_implicit_autograd_kernel and not modifies_arguments(f):
output = emit_declaration_for_noncomposite_views(f)
return output
return None
| gen_functionalization_registration | identifier_name |
gen_functionalization_type.py | from tools.codegen.api import cpp
from tools.codegen.api.types import (
DispatcherSignature, Binding, FunctionalizationLambda, ViewInverseSignature
)
from tools.codegen.api.translate import translate
from tools.codegen.context import with_native_function
from tools.codegen.model import (
Argument, NativeFunction, SchemaKind, BackendIndex,
Tag, FunctionSchema, SelfArgument, TensorOptionsArguments
)
from tools.codegen.selective_build.selector import SelectiveBuilder
from typing import List, Optional, Union, Tuple
from tools.codegen.utils import mapMaybe
def modifies_arguments(f: NativeFunction) -> bool:
return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
# This function constructs the return statement for the kernels that contain mutations
# It mostly just needs to special case multi-output returns to wrap the result in a tuple
def return_str(f: NativeFunction) -> str:
if len(f.func.arguments.out) != 0:
if len(f.func.arguments.out) > 1:
return_names = ', '.join(a.name for a in f.func.arguments.out)
return f'return {DispatcherSignature.from_schema(f.func).returns_type().cpp_type()}({return_names});'
else:
return f'return {f.func.arguments.out[0].name}'
if f.func.arguments.self_arg is not None:
return f'return {f.func.arguments.self_arg.argument.name}'
return ''
def wrapper_name(func: FunctionSchema) -> str:
if func.name.overload_name:
return f'{cpp.name(func)}_{func.name.overload_name}'
else:
return cpp.name(func)
def is_tensor_like(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> bool:
return isinstance(a, SelfArgument) or (isinstance(a, Argument) and a.type.is_tensor_like())
# unwraps all tensor-like arguments, returning:
# (1) a string containing all of the logic that does the unwrapping
# (2) a context, to be used by translate(), with all of the relevant bindings.
def unwrap_tensor_args(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
unwrapped_name = f'{arg.name}_'
unwrapped_tensor_args.append(
f'auto {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});')
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
# (1) a string containing all of the logic that does the conversions.
# (2) a context, to be used by translate(), with all of the relevant bindings.
def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
a_ = arg.name
unwrapped_name = f'{arg.name}_meta'
unwrapped_tensor_args.append(
f"auto {unwrapped_name} = at::native::empty_strided_meta({a_}.sizes(), {a_}.strides(), \
/*dtype=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \
/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);"
)
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# Generates the Functionalization kernel for:
# - ops that create aliases (e.g. transpose())
# - ops that are views AND mutations (e.g. transpose_())
def emit_view_functionalization_body(
f: NativeFunction,
functional_op: NativeFunction
) -> str:
# view op case
|
# Generates the Functionalization kernel for inplace ops
def emit_inplace_functionalization_body(
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> str:
# mutation case
assert(modifies_arguments(f))
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
maybe_return = '' if len(f.func.returns) == 0 else 'return '
sync_tensor_args = '\n '.join(mapMaybe(
lambda arg: f'at::functionalization::impl::sync({arg.name});'
if arg.type.is_tensor_like() else None,
f.func.arguments.flat_all))
if functional_op is None:
# We can't functionalize this inplace op, since we don't know what the corresponding functional op is.
inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)]
warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \
because it couldn't find an out-of-place equivalent of the operator to call. \
Instead, it's calling the inplace/view operator directly. \
If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name))
return f"""
if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{
TORCH_WARN("{warn_str}");
}}
{sync_tensor_args}
{unwrap_tensor_args_str}
at::AutoDispatchSkipFunctionalize guard;
// Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops.
{maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)});
"""
# call the out-of-place variant of the op
functional_sig = DispatcherSignature.from_schema(functional_op.func)
functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)]
mutable_input_post_processing = '\n'.join([
f"""
auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name});
{a.name}_functional->replace_(tmp_output);
{a.name}_functional->commit_update();"""
for a in f.func.arguments.flat_non_out
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()])
return f"""
{sync_tensor_args}
{unwrap_tensor_args_str}
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
// The functionalization pass explicitly doesn't pass out= parameters to the redispatch
tmp_output = at::_ops::{functional_op.func.name.unambiguous_name()}::redispatch({', '.join(functional_exprs)});
}}
{mutable_input_post_processing}
{return_str(f)};"""
def emit_declaration_for_noncomposite_views(f: NativeFunction) -> str:
# For every view op, we need a corresponding "inverse view" function.
# This generates the declarations so we get a good compiler error when someone adds a new view.
view_inverse_sig = ViewInverseSignature(f)
return view_inverse_sig.decl()
# The below functions generate RegisterFunctionalization.cpp
# These files provide the kernels that run the functionalization pass, which can be opted into
# per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
def gen_functionalization_registration(
selector: SelectiveBuilder,
f: NativeFunction,
composite_implicit_autograd_index: BackendIndex
) -> Optional[str]:
@with_native_function
def emit_registration_helper(f: NativeFunction) -> Optional[str]:
# Note: for now, this logic is meant to avoid registering functionalization kernels for mobile.
# At some point, Vulkan we'll want to use functionalization and we'll need to change this.
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
return None
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
metadata = composite_implicit_autograd_index.get_kernel(f)
assert metadata is not None
native_api_name = metadata.kernel
sig = DispatcherSignature.from_schema(f.func)
# Note [Composite view ops in the functionalization pass]
# We don't need to worry about implemententing functionalization kernels for views with
# CompositeImplicitAutograd kernels, because we can just decompose them into their base operators.
# We can't just opt the entire Functionalization dispatch key into the composite keyset though,
# because we don't want to decompose non-view ops that are composite, like `at::ones`.
registration_str = f'static_cast<{sig.ptr_type()}>(at::native::{native_api_name})'
else:
registration_str = f'TORCH_FN(functionalization::{wrapper_name(f.func)})'
return f'm.impl("{f.func.name}", {registration_str});'
return emit_registration_helper(f)
def gen_functionalization_definition(
selector: SelectiveBuilder,
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> Optional[str]:
@with_native_function
def emit_definition_helper(f: NativeFunction) -> Optional[str]:
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
return None
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
# See Note [Composite view ops in the functionalization pass]
return None
# order is important here, ops that are both views and mutations should hit the view path.
if f.is_view_op:
# Every view op is expected to have a functional counterpart (e.g. transpose_() -> transpose())
assert functional_op is not None
body_str = emit_view_functionalization_body(f, functional_op)
else:
# inplace op
assert modifies_arguments(f)
body_str = emit_inplace_functionalization_body(f, functional_op)
sig = DispatcherSignature.from_schema(f.func)
return f"""
{sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
{body_str}
}}
"""
return emit_definition_helper(f)
# See Note [Functionalization Pass: View Inverses].
@with_native_function
def gen_functionalization_view_inverse_declaration(f: NativeFunction) -> Optional[str]:
# We only need to generate view_inverse declarations for view ops that:
# - aren't composite (since they'll decompose and we'll get them for free).
# - aren't inplace (since they should have a corresponding functional version, which we call instead).
if f.is_view_op and not f.has_composite_implicit_autograd_kernel and not modifies_arguments(f):
output = emit_declaration_for_noncomposite_views(f)
return output
return None
| assert f.is_view_op
if f.tag is Tag.inplace_view:
# This op is both an inplace op AND a view op.
# See Note [Functionalization Pass - Inplace View Ops] for details.
# I currently have the view meta call into the out-of-place variant of the view, to avoid
# having to define an extra ~20 inplace {view}_inverse_ functions.
# Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
# I'm assuming that every inplace-view op has a corresponding out-of-place view op,
# with the same name but the trailing underscore removed.
# This is currently asserted at parse time in gen.py (see error_check_native_functions).
assert f.func.kind() is SchemaKind.inplace
# Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand.
assert functional_op is not None
api_name = functional_op.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(functional_op.func)
else:
api_name = f.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(f.func)
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)]
forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False)
reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True)
# The meta API call should use the same arguments, but convert all tensors to meta tensors first.
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)]
if f.tag is Tag.inplace_view:
# See Note [Functionalization Pass - Inplace View Ops] for more details
return f"""
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
at::functionalization::impl::mutate_view_meta(self, view_meta);
{unwrap_tensor_args_str}
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
}}
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
return self;
"""
else:
return f"""
{unwrap_tensor_args_str}
{return_type} tmp_output;
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)});
// I'm fusing the [alias removal], [mutation removal], [add views back] passes together.
// Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal).
}}
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
return out;
""" | identifier_body |
gen_functionalization_type.py | from tools.codegen.api import cpp
from tools.codegen.api.types import (
DispatcherSignature, Binding, FunctionalizationLambda, ViewInverseSignature
)
from tools.codegen.api.translate import translate
from tools.codegen.context import with_native_function
from tools.codegen.model import (
Argument, NativeFunction, SchemaKind, BackendIndex,
Tag, FunctionSchema, SelfArgument, TensorOptionsArguments
)
from tools.codegen.selective_build.selector import SelectiveBuilder
from typing import List, Optional, Union, Tuple
from tools.codegen.utils import mapMaybe
def modifies_arguments(f: NativeFunction) -> bool:
return f.func.kind() in [SchemaKind.inplace, SchemaKind.out]
# This function constructs the return statement for the kernels that contain mutations
# It mostly just needs to special case multi-output returns to wrap the result in a tuple
def return_str(f: NativeFunction) -> str:
if len(f.func.arguments.out) != 0:
if len(f.func.arguments.out) > 1:
return_names = ', '.join(a.name for a in f.func.arguments.out)
return f'return {DispatcherSignature.from_schema(f.func).returns_type().cpp_type()}({return_names});'
else:
return f'return {f.func.arguments.out[0].name}'
if f.func.arguments.self_arg is not None:
return f'return {f.func.arguments.self_arg.argument.name}'
return ''
def wrapper_name(func: FunctionSchema) -> str:
if func.name.overload_name:
return f'{cpp.name(func)}_{func.name.overload_name}'
else:
return cpp.name(func)
def is_tensor_like(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> bool:
return isinstance(a, SelfArgument) or (isinstance(a, Argument) and a.type.is_tensor_like())
# unwraps all tensor-like arguments, returning:
# (1) a string containing all of the logic that does the unwrapping
# (2) a context, to be used by translate(), with all of the relevant bindings.
def unwrap_tensor_args(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
unwrapped_name = f'{arg.name}_'
unwrapped_tensor_args.append(
f'auto {unwrapped_name} = at::functionalization::impl::from_functional_tensor({arg.name});')
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# converts all tensor-like arguments to meta tensors, which are used to compute stride info. Returns:
# (1) a string containing all of the logic that does the conversions.
# (2) a context, to be used by translate(), with all of the relevant bindings.
def convert_to_meta_tensors(sig: DispatcherSignature) -> Tuple[str, List[Binding]]:
context: List[Binding] = []
unwrapped_tensor_args: List[str] = []
for arg in sig.arguments():
if is_tensor_like(arg.argument):
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
# for tensor inputs, we want to unwrap them before passing them into the redispatch calls.
a_ = arg.name
unwrapped_name = f'{arg.name}_meta'
unwrapped_tensor_args.append(
f"auto {unwrapped_name} = at::native::empty_strided_meta({a_}.sizes(), {a_}.strides(), \
/*dtype=*/c10::make_optional({a_}.scalar_type()), /*layout=*/c10::make_optional({a_}.layout()), \
/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);"
)
context.append(arg.with_name(unwrapped_name))
else:
# for non-tensor inputs, we want to pass them directly into the redispatch calls.
context.append(arg)
unwrap_tensor_args_str = '\n '.join(unwrapped_tensor_args)
return unwrap_tensor_args_str, context
# Generates the Functionalization kernel for:
# - ops that create aliases (e.g. transpose())
# - ops that are views AND mutations (e.g. transpose_())
def emit_view_functionalization_body(
f: NativeFunction,
functional_op: NativeFunction
) -> str:
# view op case
assert f.is_view_op
if f.tag is Tag.inplace_view:
# This op is both an inplace op AND a view op.
# See Note [Functionalization Pass - Inplace View Ops] for details.
# I currently have the view meta call into the out-of-place variant of the view, to avoid
# having to define an extra ~20 inplace {view}_inverse_ functions.
# Most view ops don't have NativeFunctionGroup's both, because we don't define out= variants for view ops.
# I'm assuming that every inplace-view op has a corresponding out-of-place view op,
# with the same name but the trailing underscore removed.
# This is currently asserted at parse time in gen.py (see error_check_native_functions).
assert f.func.kind() is SchemaKind.inplace
# Requirement: Every inplace_view op needs to have a corresponding functional view op, which we paired together beforehand.
assert functional_op is not None
api_name = functional_op.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(functional_op.func)
else:
api_name = f.func.name.unambiguous_name()
call_sig = DispatcherSignature.from_schema(f.func)
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
view_redispatch_args = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, call_sig.arguments(), method=False)]
forward_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=False)
reverse_lambda = FunctionalizationLambda.from_func(f, functional_op=functional_op, is_reverse=True)
# The meta API call should use the same arguments, but convert all tensors to meta tensors first.
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
meta_call_args = [e.expr for e in translate(meta_call_ctx, call_sig.arguments(), method=False)]
if f.tag is Tag.inplace_view:
# See Note [Functionalization Pass - Inplace View Ops] for more details
return f"""
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
at::functionalization::impl::mutate_view_meta(self, view_meta);
{unwrap_tensor_args_str}
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
}}
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(self, reference_tensor_output);
return self;
"""
else:
return f"""
{unwrap_tensor_args_str}
{return_type} tmp_output;
{return_type} reference_tensor_output;
{{
at::AutoDispatchSkipFunctionalize guard;
{meta_conversion_str}
reference_tensor_output = at::_ops::{api_name}::call({', '.join(meta_call_args)});
tmp_output = at::_ops::{api_name}::redispatch({', '.join(view_redispatch_args)});
// I'm fusing the [alias removal], [mutation removal], [add views back] passes together.
// Later, we'll want to turn them into separate passes (since e.g. vulkan only cares about alias removal).
}}
at::functionalization::ViewMeta view_meta = at::functionalization::ViewMeta(
{forward_lambda.decl()} {{
return {forward_lambda.inner_call()}
}},
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}}
);
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, self, view_meta);
// See Note [Propagating strides in the functionalization pass]
at::functionalization::impl::set_sizes_strides_offset(out, reference_tensor_output);
return out;
"""
# Generates the Functionalization kernel for inplace ops
def emit_inplace_functionalization_body(
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> str:
# mutation case
assert(modifies_arguments(f))
dispatcher_sig = DispatcherSignature.from_schema(f.func)
keyset = 'dispatchKeySet & c10::after_func_keyset'
return_type = dispatcher_sig.returns_type().remove_const_ref().cpp_type()
unwrap_tensor_args_str, unwrapped_args_ctx = unwrap_tensor_args(dispatcher_sig)
maybe_return = '' if len(f.func.returns) == 0 else 'return '
sync_tensor_args = '\n '.join(mapMaybe(
lambda arg: f'at::functionalization::impl::sync({arg.name});'
if arg.type.is_tensor_like() else None,
f.func.arguments.flat_all))
if functional_op is None:
# We can't functionalize this inplace op, since we don't know what the corresponding functional op is.
inplace_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, dispatcher_sig.arguments(), method=False)]
warn_str = "Note: the functionalization pass encountered an operator ({}) that it could not functionalize, \
because it couldn't find an out-of-place equivalent of the operator to call. \
Instead, it's calling the inplace/view operator directly. \
If this causes problems in your program, consider upstreaming the out-of-place op to PyTorch.".format(str(f.func.name))
return f"""
if (c10::impl::tls_local_dispatch_key_set().included_.has(c10::DispatchKey::Functionalize)) {{
TORCH_WARN("{warn_str}");
}}
{sync_tensor_args}
{unwrap_tensor_args_str}
at::AutoDispatchSkipFunctionalize guard;
// Redispatch as normally otherwise, since XLA has its own lowerings for special inplace ops.
{maybe_return}at::_ops::{f.func.name.unambiguous_name()}::redispatch({', '.join(inplace_exprs)});
"""
# call the out-of-place variant of the op
functional_sig = DispatcherSignature.from_schema(functional_op.func)
functional_exprs = [keyset] + [e.expr for e in translate(unwrapped_args_ctx, functional_sig.arguments(), method=False)]
mutable_input_post_processing = '\n'.join([
f"""
auto {a.name}_functional = at::functionalization::impl::unsafeGetFunctionalWrapper({a.name});
{a.name}_functional->replace_(tmp_output);
{a.name}_functional->commit_update();"""
for a in f.func.arguments.flat_non_out
if a.annotation and a.annotation.is_write and a.type.is_tensor_like()])
return f"""
{sync_tensor_args}
{unwrap_tensor_args_str}
{return_type} tmp_output;
{{
at::AutoDispatchSkipFunctionalize guard;
// The functionalization pass explicitly doesn't pass out= parameters to the redispatch
tmp_output = at::_ops::{functional_op.func.name.unambiguous_name()}::redispatch({', '.join(functional_exprs)});
}}
{mutable_input_post_processing}
{return_str(f)};"""
def emit_declaration_for_noncomposite_views(f: NativeFunction) -> str:
# For every view op, we need a corresponding "inverse view" function.
# This generates the declarations so we get a good compiler error when someone adds a new view.
view_inverse_sig = ViewInverseSignature(f)
return view_inverse_sig.decl()
# The below functions generate RegisterFunctionalization.cpp
# These files provide the kernels that run the functionalization pass, which can be opted into
# per backend (e.g. XLA or Vulkan), or as a composable transform (functionalize() in functorch).
def gen_functionalization_registration(
selector: SelectiveBuilder,
f: NativeFunction,
composite_implicit_autograd_index: BackendIndex
) -> Optional[str]:
@with_native_function
def emit_registration_helper(f: NativeFunction) -> Optional[str]:
# Note: for now, this logic is meant to avoid registering functionalization kernels for mobile.
# At some point, Vulkan we'll want to use functionalization and we'll need to change this.
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
|
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
metadata = composite_implicit_autograd_index.get_kernel(f)
assert metadata is not None
native_api_name = metadata.kernel
sig = DispatcherSignature.from_schema(f.func)
# Note [Composite view ops in the functionalization pass]
# We don't need to worry about implemententing functionalization kernels for views with
# CompositeImplicitAutograd kernels, because we can just decompose them into their base operators.
# We can't just opt the entire Functionalization dispatch key into the composite keyset though,
# because we don't want to decompose non-view ops that are composite, like `at::ones`.
registration_str = f'static_cast<{sig.ptr_type()}>(at::native::{native_api_name})'
else:
registration_str = f'TORCH_FN(functionalization::{wrapper_name(f.func)})'
return f'm.impl("{f.func.name}", {registration_str});'
return emit_registration_helper(f)
def gen_functionalization_definition(
selector: SelectiveBuilder,
f: NativeFunction,
functional_op: Optional[NativeFunction]
) -> Optional[str]:
@with_native_function
def emit_definition_helper(f: NativeFunction) -> Optional[str]:
if not selector.include_all_operators:
return None
if not f.is_view_op and not modifies_arguments(f):
return None
if f.is_view_op and f.has_composite_implicit_autograd_kernel:
# See Note [Composite view ops in the functionalization pass]
return None
# order is important here, ops that are both views and mutations should hit the view path.
if f.is_view_op:
# Every view op is expected to have a functional counterpart (e.g. transpose_() -> transpose())
assert functional_op is not None
body_str = emit_view_functionalization_body(f, functional_op)
else:
# inplace op
assert modifies_arguments(f)
body_str = emit_inplace_functionalization_body(f, functional_op)
sig = DispatcherSignature.from_schema(f.func)
return f"""
{sig.defn(name=wrapper_name(f.func), is_redispatching_fn=True)} {{
{body_str}
}}
"""
return emit_definition_helper(f)
# See Note [Functionalization Pass: View Inverses].
@with_native_function
def gen_functionalization_view_inverse_declaration(f: NativeFunction) -> Optional[str]:
# We only need to generate view_inverse declarations for view ops that:
# - aren't composite (since they'll decompose and we'll get them for free).
# - aren't inplace (since they should have a corresponding functional version, which we call instead).
if f.is_view_op and not f.has_composite_implicit_autograd_kernel and not modifies_arguments(f):
output = emit_declaration_for_noncomposite_views(f)
return output
return None
| return None | conditional_block |
iterative_gmm.py |
import os, shutil
import itertools
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.io import loadmat
from scipy.ndimage import gaussian_filter1d
from scipy import interpolate
from scipy import linalg
from sklearn.covariance import EmpiricalCovariance, MinCovDet
from sklearn.decomposition import NMF, FastICA, PCA
from sklearn.metrics import homogeneity_score,homogeneity_completeness_v_measure
from sklearn import mixture
from skimage.restoration import inpaint
from scipy.stats import multivariate_normal
from sklearn.cluster import KMeans
import scipy.ndimage as ndimage
import scipy.spatial as spatial
import scipy.misc as misc
import matplotlib.patches as patches
from matplotlib.patches import Rectangle, Ellipse
class | :
def __init__(self):
self.XY = None
def plot_cov(self,means, covariances,ct):
if ct == 'spherical':
return
color_iter = itertools.cycle(['navy', 'navy', 'cornflowerblue', 'gold',
'darkorange'])
ax =plt.gca()
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
alpha = 0.2
ell = Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
ell = Ellipse(mean, v[0]*4, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ell = Ellipse(mean, v[0]*2, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
# multi_normal = multivariate_normal(mean[0:2],u[0:2])
# ax.contour(np.sort(X[:,0]),np.sort(X[:,1]),
# multi_normal.pdf(self.XY).reshape(X.shape[0],X.shape[0]),
# colors='black',alpha=0.3)
ax.scatter(mean[0],mean[1],c='grey',zorder=10,s=100)
def iterative_gmm(self,dataset = 'bb',fake = True,mode = 'gmm',binary = False,im_dir = './images/',savegif = False,title ='temp',bic_thresh = 0,maxiter = 40,nc =5,v_and_1 = False,thresh = 0.9,cov=[],n_components=2,covt='spherical',ra=False,pca = True):
'''
dataset: string
The filename of the material something like 'bb','pp'
fake: bool
Whether or not the data is fake, if it is not it will be cropped
mode: str
'fraction' will reduce the input to a combination of the relative signals
e.g. bin1 - bin0/sum
binary: bool
Whether or not to show the output as binary or not
nc: int
pca components
'''
# Clear the imagedir
if savegif:
folder = im_dir
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
if ra:
arrowsU = []
arrowsV = []
bic0 = np.infty
itern = 0
inds = [2,3,4]
label_true = loadmat('2'+dataset+'_mask')['BW']
X1 = loadmat('all'+dataset)['Z'][:,inds]
if not fake:
X1 = X1[400:,:].copy()
label_true = label_true[400:].copy()
length = 48
else:
length = 68
# This is code for just looking at the ratio of the bins
if mode == 'fraction':
# initialize vector for fraction
X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))])
result = [x for x in itertools.combinations(np.arange(5),2)]
for jj in range(0,X2.shape[1]):
r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F")
X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ind = np.argsort(np.mean(X1,0))
# X1 = X2[:,ind[:4]]
else:
for jj in range(0,X1.shape[1]):
r2 = np.reshape(X1[:,jj], (20,length), order="F")
X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ct = 'full'
if mode == 'bgmm':
bgmm = mixture.BayesianGaussianMixture(
n_components=n_components, covariance_type=covt)
elif mode == 'kmeans':
km = KMeans(n_clusters=n_components)
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=covt)
gmm1 = mixture.GaussianMixture(n_components=n_components,
covariance_type='full')
ims = []
pca = True
# Do the PCA decomposition
if pca:
X1 = PCA(n_components=nc).fit_transform(X1)
X3 = X1[:,0:2].copy()
fig = plt.figure(figsize=(10,10))
bics = []
for ii in range(0,maxiter):
X = X1.copy()
if mode == 'gmm':
y_pred = gmm.fit_predict(X)
elif mode == 'bgmm':
y_pred = bgmm.fit_predict(X)
y_ff = gmm.fit(X)
elif mode == 'kmeans':
y_pred = km.fit_predict(X)
y_ff = gmm.fit(X)
y_ff1 = gmm1.fit(X)
# if I should show the vmeasure
if v_and_1:
homo1,comp1,vs1 = homogeneity_completeness_v_measure(
label_true.squeeze(), y_pred)
bic = gmm.aic(X)
bic1 = gmm1.aic(X)
print(vs1,itern,bic,bic1)
else:
bic = gmm.aic(X)
print(bic)
# Stop if bic is lower
if bic - bic0 < bic_thresh:
bic0 = bic
else:
print('BIC got higher')
break
print(bic)
# map the bad values to zero
for kk in range(n_components):
temp = X[y_pred == kk,:]
if cov == 'robust':
robust_cov = MinCovDet().fit(temp)
else:
robust_cov = EmpiricalCovariance().fit(temp)
# Calculating the mahal distances
robust_mahal = robust_cov.mahalanobis(
temp - robust_cov.location_) ** (0.33)
if thresh < 1:
temp[robust_mahal > robust_mahal.max()*thresh] = 0
else:
# import pdb; pdb.set_trace()
temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0
X[y_pred == kk,:] = temp
mask_one = X[:,1] == 0
if y_pred[3] == 0:
# Map top to zero if it is the wrong combo
y_pred = y_pred + 1
y_pred[y_pred == n_components] = 0
m_reshape = np.reshape(mask_one, (20,length), order="F")
if itern == 0:
y_0 = y_pred
# Plotting functions
ax0 = fig.add_subplot(111)
a = -(y_pred - label_true.squeeze())
y_reshape = np.reshape(a, (20,length), order="F")
colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
ax0.set_title('New Method')
self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full')
if itern == 0:
axes = plt.gca()
ylim = axes.get_ylim()
xlim = axes.get_xlim()
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
ax2 = plt.axes([.25, .55, .6, .4], facecolor='y')
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
plt.title('Image Space')
plt.xticks([])
plt.yticks([])
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
i, j = np.where(m_reshape == True)
# if binary:
# plt.imshow(y_reshape,cmap='brg')
# else:
plt.scatter(j,i,marker='x',c='k')
# import pdb; pdb.set_trace()
d = [i for i, x in enumerate(mask_one) if x]
ax0.scatter(X1[d,0],X1[d,1],marker='x',c='k')
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
X2 = X1.copy()
# Inpainting the zeros
r2 = np.reshape(X1, (20,length,X1.shape[1]), order="F")
X1 = np.reshape(inpaint.inpaint_biharmonic(
r2,m_reshape,multichannel=True),
(20*length,X1.shape[1]),order="F")
ax0.plot([X2[d,0],X1[d,0]],[X2[d,1],X1[d,1]],'r')
if ra:
arrowsU.append([X2[d,0],X1[d,0]])
arrowsV.append([X2[d,1],X1[d,1]])
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.clf()
# X_old = X.copy()
# np.save('bb',y_reshape)
# plt.figure()
# robust_mahal1.sort()
# plt.plot(robust_mahal1)
# plt.plot(250,robust_mahal1.max()*.87,'r*')
# plt.savefig('./images2/{}.png'.format(format(itern, "02")))
# plt.figure()
# robust_mahal2.sort()
# plt.plot(robust_mahal2)
# plt.plot(250,robust_mahal2.max()*.87,'r*')
# plt.savefig('./images3/{}.png'.format(format(itern, "02")))
itern += 1
fig = plt.figure(figsize=(10,10))
ax0 = fig.add_subplot(111)
colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
self.plot_cov(gmm1.means_, gmm1.covariances_,ct)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
ax0.set_title('New Method')
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
r = np.reshape(y_pred, (20,length), order="F")
if binary:
self.animate(y_reshape)
else:
self.animate(y_reshape, im=np.reshape(X1[:,0], (20,length), order="F"))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.figure()
plt.imshow(r)
plt.xticks([])
plt.yticks([])
plt.figure()
r0 = np.reshape(y_0, (20,length), order="F")
plt.imshow(r - r0)
plt.xticks([])
plt.yticks([])
if savegif:
# save gif
files = os.listdir('./images')
images = []
for filename in files:
images.append(imageio.imread('./images/'+filename))
imageio.mimsave(title + '.mp4', images,fps=1)
imageio.mimsave(title + '.gif', images)
if ra:
return arrowsU,arrowsV
def find_paws(self,data, smooth_radius = 1, threshold = 0.0001):
# https://stackoverflow.com/questions/4087919/how-can-i-improve-my-paw-detection
"""Detects and isolates contiguous regions in the input array"""
# Blur the input data a bit so the paws have a continous footprint
data = ndimage.uniform_filter(data, smooth_radius)
# Threshold the blurred data (this needs to be a bit > 0 due to the blur)
thresh = data > threshold
# Fill any interior holes in the paws to get cleaner regions...
filled = ndimage.morphology.binary_fill_holes(thresh)
# Label each contiguous paw
coded_paws, num_paws = ndimage.label(filled)
# Isolate the extent of each paw
# find_objects returns a list of 2-tuples: (slice(...), slice(...))
# which represents a rectangular box around the object
data_slices = ndimage.find_objects(coded_paws)
return data_slices
def animate(self,frame,im = None):
"""Detects paws and animates the position and raw data of each frame
in the input file"""
# With matplotlib, it's much, much faster to just update the properties
# of a display object than it is to create a new one, so we'll just update
# the data and position of the same objects throughout this animation...
# Since we're making an animation with matplotlib, we need
# ion() instead of show()...
fig = plt.gcf()
ax = plt.axes([.25, .55, .6, .4], facecolor='y')
plt.axis('off')
# Make an image based on the first frame that we'll update later
# (The first frame is never actually displayed)
if im is None:
plt.imshow(frame,cmap='brg')
else:
plt.imshow(im)
plt.title('Image Space')
# Make 4 rectangles that we can later move to the position of each paw
rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]
[ax.add_patch(rect) for rect in rects]
# Process and display each frame
paw_slices = self.find_paws(frame)
# Hide any rectangles that might be visible
[rect.set_visible(False) for rect in rects]
# Set the position and size of a rectangle for each paw and display it
for slice, rect in zip(paw_slices, rects):
dy, dx = slice
rect.set_xy((dx.start, dy.start))
rect.set_width(dx.stop - dx.start + 1)
rect.set_height(dy.stop - dy.start + 1)
rect.set_visible(True)
| I_gmm | identifier_name |
iterative_gmm.py |
import os, shutil
import itertools
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.io import loadmat
from scipy.ndimage import gaussian_filter1d
from scipy import interpolate
from scipy import linalg
from sklearn.covariance import EmpiricalCovariance, MinCovDet
from sklearn.decomposition import NMF, FastICA, PCA
from sklearn.metrics import homogeneity_score,homogeneity_completeness_v_measure
from sklearn import mixture
from skimage.restoration import inpaint
from scipy.stats import multivariate_normal
from sklearn.cluster import KMeans
import scipy.ndimage as ndimage
import scipy.spatial as spatial
import scipy.misc as misc
import matplotlib.patches as patches
from matplotlib.patches import Rectangle, Ellipse
class I_gmm:
def __init__(self):
self.XY = None
def plot_cov(self,means, covariances,ct):
if ct == 'spherical':
return
color_iter = itertools.cycle(['navy', 'navy', 'cornflowerblue', 'gold',
'darkorange'])
ax =plt.gca()
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
alpha = 0.2
ell = Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
ell = Ellipse(mean, v[0]*4, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ell = Ellipse(mean, v[0]*2, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
# multi_normal = multivariate_normal(mean[0:2],u[0:2])
# ax.contour(np.sort(X[:,0]),np.sort(X[:,1]),
# multi_normal.pdf(self.XY).reshape(X.shape[0],X.shape[0]),
# colors='black',alpha=0.3)
ax.scatter(mean[0],mean[1],c='grey',zorder=10,s=100)
def iterative_gmm(self,dataset = 'bb',fake = True,mode = 'gmm',binary = False,im_dir = './images/',savegif = False,title ='temp',bic_thresh = 0,maxiter = 40,nc =5,v_and_1 = False,thresh = 0.9,cov=[],n_components=2,covt='spherical',ra=False,pca = True):
'''
dataset: string
The filename of the material something like 'bb','pp'
fake: bool
Whether or not the data is fake, if it is not it will be cropped
mode: str
'fraction' will reduce the input to a combination of the relative signals
e.g. bin1 - bin0/sum
binary: bool
Whether or not to show the output as binary or not
nc: int
pca components
'''
# Clear the imagedir
if savegif:
folder = im_dir
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
if ra:
arrowsU = []
arrowsV = []
bic0 = np.infty
itern = 0
inds = [2,3,4]
label_true = loadmat('2'+dataset+'_mask')['BW']
X1 = loadmat('all'+dataset)['Z'][:,inds]
if not fake:
X1 = X1[400:,:].copy()
label_true = label_true[400:].copy()
length = 48
else:
length = 68
# This is code for just looking at the ratio of the bins
if mode == 'fraction':
# initialize vector for fraction
X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))])
result = [x for x in itertools.combinations(np.arange(5),2)]
for jj in range(0,X2.shape[1]):
r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F")
X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ind = np.argsort(np.mean(X1,0))
# X1 = X2[:,ind[:4]]
else:
for jj in range(0,X1.shape[1]):
r2 = np.reshape(X1[:,jj], (20,length), order="F")
X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ct = 'full'
if mode == 'bgmm':
bgmm = mixture.BayesianGaussianMixture(
n_components=n_components, covariance_type=covt)
elif mode == 'kmeans':
km = KMeans(n_clusters=n_components)
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=covt)
gmm1 = mixture.GaussianMixture(n_components=n_components,
covariance_type='full')
ims = []
pca = True
# Do the PCA decomposition
if pca:
X1 = PCA(n_components=nc).fit_transform(X1)
X3 = X1[:,0:2].copy()
fig = plt.figure(figsize=(10,10))
bics = []
for ii in range(0,maxiter):
X = X1.copy()
if mode == 'gmm':
y_pred = gmm.fit_predict(X)
elif mode == 'bgmm':
y_pred = bgmm.fit_predict(X)
y_ff = gmm.fit(X)
elif mode == 'kmeans':
y_pred = km.fit_predict(X)
y_ff = gmm.fit(X)
y_ff1 = gmm1.fit(X)
# if I should show the vmeasure
if v_and_1:
homo1,comp1,vs1 = homogeneity_completeness_v_measure(
label_true.squeeze(), y_pred)
bic = gmm.aic(X)
bic1 = gmm1.aic(X)
print(vs1,itern,bic,bic1)
else:
bic = gmm.aic(X)
print(bic)
# Stop if bic is lower
if bic - bic0 < bic_thresh:
bic0 = bic
else:
print('BIC got higher')
break
print(bic)
# map the bad values to zero
for kk in range(n_components):
temp = X[y_pred == kk,:]
if cov == 'robust':
robust_cov = MinCovDet().fit(temp)
else:
robust_cov = EmpiricalCovariance().fit(temp)
# Calculating the mahal distances
robust_mahal = robust_cov.mahalanobis(
temp - robust_cov.location_) ** (0.33)
if thresh < 1:
temp[robust_mahal > robust_mahal.max()*thresh] = 0
else:
# import pdb; pdb.set_trace()
temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0
X[y_pred == kk,:] = temp
mask_one = X[:,1] == 0
if y_pred[3] == 0:
# Map top to zero if it is the wrong combo
y_pred = y_pred + 1
y_pred[y_pred == n_components] = 0
m_reshape = np.reshape(mask_one, (20,length), order="F")
if itern == 0:
y_0 = y_pred
# Plotting functions
ax0 = fig.add_subplot(111)
a = -(y_pred - label_true.squeeze())
y_reshape = np.reshape(a, (20,length), order="F")
colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
ax0.set_title('New Method')
self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full')
if itern == 0:
axes = plt.gca()
ylim = axes.get_ylim()
xlim = axes.get_xlim()
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
ax2 = plt.axes([.25, .55, .6, .4], facecolor='y')
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
plt.title('Image Space')
plt.xticks([])
plt.yticks([])
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
i, j = np.where(m_reshape == True)
# if binary:
# plt.imshow(y_reshape,cmap='brg')
# else:
plt.scatter(j,i,marker='x',c='k')
# import pdb; pdb.set_trace()
d = [i for i, x in enumerate(mask_one) if x]
ax0.scatter(X1[d,0],X1[d,1],marker='x',c='k')
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
X2 = X1.copy()
# Inpainting the zeros
r2 = np.reshape(X1, (20,length,X1.shape[1]), order="F")
X1 = np.reshape(inpaint.inpaint_biharmonic(
r2,m_reshape,multichannel=True),
(20*length,X1.shape[1]),order="F")
ax0.plot([X2[d,0],X1[d,0]],[X2[d,1],X1[d,1]],'r')
if ra:
arrowsU.append([X2[d,0],X1[d,0]])
arrowsV.append([X2[d,1],X1[d,1]])
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.clf()
# X_old = X.copy()
# np.save('bb',y_reshape)
# plt.figure()
# robust_mahal1.sort()
# plt.plot(robust_mahal1)
# plt.plot(250,robust_mahal1.max()*.87,'r*')
# plt.savefig('./images2/{}.png'.format(format(itern, "02")))
# plt.figure()
# robust_mahal2.sort()
# plt.plot(robust_mahal2)
# plt.plot(250,robust_mahal2.max()*.87,'r*')
# plt.savefig('./images3/{}.png'.format(format(itern, "02")))
itern += 1
fig = plt.figure(figsize=(10,10))
ax0 = fig.add_subplot(111)
colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
|
self.plot_cov(gmm1.means_, gmm1.covariances_,ct)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
ax0.set_title('New Method')
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
r = np.reshape(y_pred, (20,length), order="F")
if binary:
self.animate(y_reshape)
else:
self.animate(y_reshape, im=np.reshape(X1[:,0], (20,length), order="F"))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.figure()
plt.imshow(r)
plt.xticks([])
plt.yticks([])
plt.figure()
r0 = np.reshape(y_0, (20,length), order="F")
plt.imshow(r - r0)
plt.xticks([])
plt.yticks([])
if savegif:
# save gif
files = os.listdir('./images')
images = []
for filename in files:
images.append(imageio.imread('./images/'+filename))
imageio.mimsave(title + '.mp4', images,fps=1)
imageio.mimsave(title + '.gif', images)
if ra:
return arrowsU,arrowsV
def find_paws(self,data, smooth_radius = 1, threshold = 0.0001):
# https://stackoverflow.com/questions/4087919/how-can-i-improve-my-paw-detection
"""Detects and isolates contiguous regions in the input array"""
# Blur the input data a bit so the paws have a continous footprint
data = ndimage.uniform_filter(data, smooth_radius)
# Threshold the blurred data (this needs to be a bit > 0 due to the blur)
thresh = data > threshold
# Fill any interior holes in the paws to get cleaner regions...
filled = ndimage.morphology.binary_fill_holes(thresh)
# Label each contiguous paw
coded_paws, num_paws = ndimage.label(filled)
# Isolate the extent of each paw
# find_objects returns a list of 2-tuples: (slice(...), slice(...))
# which represents a rectangular box around the object
data_slices = ndimage.find_objects(coded_paws)
return data_slices
def animate(self,frame,im = None):
"""Detects paws and animates the position and raw data of each frame
in the input file"""
# With matplotlib, it's much, much faster to just update the properties
# of a display object than it is to create a new one, so we'll just update
# the data and position of the same objects throughout this animation...
# Since we're making an animation with matplotlib, we need
# ion() instead of show()...
fig = plt.gcf()
ax = plt.axes([.25, .55, .6, .4], facecolor='y')
plt.axis('off')
# Make an image based on the first frame that we'll update later
# (The first frame is never actually displayed)
if im is None:
plt.imshow(frame,cmap='brg')
else:
plt.imshow(im)
plt.title('Image Space')
# Make 4 rectangles that we can later move to the position of each paw
rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]
[ax.add_patch(rect) for rect in rects]
# Process and display each frame
paw_slices = self.find_paws(frame)
# Hide any rectangles that might be visible
[rect.set_visible(False) for rect in rects]
# Set the position and size of a rectangle for each paw and display it
for slice, rect in zip(paw_slices, rects):
dy, dx = slice
rect.set_xy((dx.start, dy.start))
rect.set_width(dx.stop - dx.start + 1)
rect.set_height(dy.stop - dy.start + 1)
rect.set_visible(True)
| print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())]) | conditional_block |
iterative_gmm.py |
import os, shutil
import itertools
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.io import loadmat
from scipy.ndimage import gaussian_filter1d
from scipy import interpolate
from scipy import linalg
from sklearn.covariance import EmpiricalCovariance, MinCovDet
from sklearn.decomposition import NMF, FastICA, PCA
from sklearn.metrics import homogeneity_score,homogeneity_completeness_v_measure
from sklearn import mixture
from skimage.restoration import inpaint
from scipy.stats import multivariate_normal
from sklearn.cluster import KMeans
import scipy.ndimage as ndimage
import scipy.spatial as spatial
import scipy.misc as misc
import matplotlib.patches as patches
from matplotlib.patches import Rectangle, Ellipse
class I_gmm:
def __init__(self):
self.XY = None
def plot_cov(self,means, covariances,ct):
|
def iterative_gmm(self,dataset = 'bb',fake = True,mode = 'gmm',binary = False,im_dir = './images/',savegif = False,title ='temp',bic_thresh = 0,maxiter = 40,nc =5,v_and_1 = False,thresh = 0.9,cov=[],n_components=2,covt='spherical',ra=False,pca = True):
'''
dataset: string
The filename of the material something like 'bb','pp'
fake: bool
Whether or not the data is fake, if it is not it will be cropped
mode: str
'fraction' will reduce the input to a combination of the relative signals
e.g. bin1 - bin0/sum
binary: bool
Whether or not to show the output as binary or not
nc: int
pca components
'''
# Clear the imagedir
if savegif:
folder = im_dir
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
if ra:
arrowsU = []
arrowsV = []
bic0 = np.infty
itern = 0
inds = [2,3,4]
label_true = loadmat('2'+dataset+'_mask')['BW']
X1 = loadmat('all'+dataset)['Z'][:,inds]
if not fake:
X1 = X1[400:,:].copy()
label_true = label_true[400:].copy()
length = 48
else:
length = 68
# This is code for just looking at the ratio of the bins
if mode == 'fraction':
# initialize vector for fraction
X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))])
result = [x for x in itertools.combinations(np.arange(5),2)]
for jj in range(0,X2.shape[1]):
r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F")
X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ind = np.argsort(np.mean(X1,0))
# X1 = X2[:,ind[:4]]
else:
for jj in range(0,X1.shape[1]):
r2 = np.reshape(X1[:,jj], (20,length), order="F")
X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ct = 'full'
if mode == 'bgmm':
bgmm = mixture.BayesianGaussianMixture(
n_components=n_components, covariance_type=covt)
elif mode == 'kmeans':
km = KMeans(n_clusters=n_components)
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=covt)
gmm1 = mixture.GaussianMixture(n_components=n_components,
covariance_type='full')
ims = []
pca = True
# Do the PCA decomposition
if pca:
X1 = PCA(n_components=nc).fit_transform(X1)
X3 = X1[:,0:2].copy()
fig = plt.figure(figsize=(10,10))
bics = []
for ii in range(0,maxiter):
X = X1.copy()
if mode == 'gmm':
y_pred = gmm.fit_predict(X)
elif mode == 'bgmm':
y_pred = bgmm.fit_predict(X)
y_ff = gmm.fit(X)
elif mode == 'kmeans':
y_pred = km.fit_predict(X)
y_ff = gmm.fit(X)
y_ff1 = gmm1.fit(X)
# if I should show the vmeasure
if v_and_1:
homo1,comp1,vs1 = homogeneity_completeness_v_measure(
label_true.squeeze(), y_pred)
bic = gmm.aic(X)
bic1 = gmm1.aic(X)
print(vs1,itern,bic,bic1)
else:
bic = gmm.aic(X)
print(bic)
# Stop if bic is lower
if bic - bic0 < bic_thresh:
bic0 = bic
else:
print('BIC got higher')
break
print(bic)
# map the bad values to zero
for kk in range(n_components):
temp = X[y_pred == kk,:]
if cov == 'robust':
robust_cov = MinCovDet().fit(temp)
else:
robust_cov = EmpiricalCovariance().fit(temp)
# Calculating the mahal distances
robust_mahal = robust_cov.mahalanobis(
temp - robust_cov.location_) ** (0.33)
if thresh < 1:
temp[robust_mahal > robust_mahal.max()*thresh] = 0
else:
# import pdb; pdb.set_trace()
temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0
X[y_pred == kk,:] = temp
mask_one = X[:,1] == 0
if y_pred[3] == 0:
# Map top to zero if it is the wrong combo
y_pred = y_pred + 1
y_pred[y_pred == n_components] = 0
m_reshape = np.reshape(mask_one, (20,length), order="F")
if itern == 0:
y_0 = y_pred
# Plotting functions
ax0 = fig.add_subplot(111)
a = -(y_pred - label_true.squeeze())
y_reshape = np.reshape(a, (20,length), order="F")
colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
ax0.set_title('New Method')
self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full')
if itern == 0:
axes = plt.gca()
ylim = axes.get_ylim()
xlim = axes.get_xlim()
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
ax2 = plt.axes([.25, .55, .6, .4], facecolor='y')
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
plt.title('Image Space')
plt.xticks([])
plt.yticks([])
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
i, j = np.where(m_reshape == True)
# if binary:
# plt.imshow(y_reshape,cmap='brg')
# else:
plt.scatter(j,i,marker='x',c='k')
# import pdb; pdb.set_trace()
d = [i for i, x in enumerate(mask_one) if x]
ax0.scatter(X1[d,0],X1[d,1],marker='x',c='k')
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
X2 = X1.copy()
# Inpainting the zeros
r2 = np.reshape(X1, (20,length,X1.shape[1]), order="F")
X1 = np.reshape(inpaint.inpaint_biharmonic(
r2,m_reshape,multichannel=True),
(20*length,X1.shape[1]),order="F")
ax0.plot([X2[d,0],X1[d,0]],[X2[d,1],X1[d,1]],'r')
if ra:
arrowsU.append([X2[d,0],X1[d,0]])
arrowsV.append([X2[d,1],X1[d,1]])
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.clf()
# X_old = X.copy()
# np.save('bb',y_reshape)
# plt.figure()
# robust_mahal1.sort()
# plt.plot(robust_mahal1)
# plt.plot(250,robust_mahal1.max()*.87,'r*')
# plt.savefig('./images2/{}.png'.format(format(itern, "02")))
# plt.figure()
# robust_mahal2.sort()
# plt.plot(robust_mahal2)
# plt.plot(250,robust_mahal2.max()*.87,'r*')
# plt.savefig('./images3/{}.png'.format(format(itern, "02")))
itern += 1
fig = plt.figure(figsize=(10,10))
ax0 = fig.add_subplot(111)
colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
self.plot_cov(gmm1.means_, gmm1.covariances_,ct)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
ax0.set_title('New Method')
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
r = np.reshape(y_pred, (20,length), order="F")
if binary:
self.animate(y_reshape)
else:
self.animate(y_reshape, im=np.reshape(X1[:,0], (20,length), order="F"))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.figure()
plt.imshow(r)
plt.xticks([])
plt.yticks([])
plt.figure()
r0 = np.reshape(y_0, (20,length), order="F")
plt.imshow(r - r0)
plt.xticks([])
plt.yticks([])
if savegif:
# save gif
files = os.listdir('./images')
images = []
for filename in files:
images.append(imageio.imread('./images/'+filename))
imageio.mimsave(title + '.mp4', images,fps=1)
imageio.mimsave(title + '.gif', images)
if ra:
return arrowsU,arrowsV
def find_paws(self,data, smooth_radius = 1, threshold = 0.0001):
# https://stackoverflow.com/questions/4087919/how-can-i-improve-my-paw-detection
"""Detects and isolates contiguous regions in the input array"""
# Blur the input data a bit so the paws have a continous footprint
data = ndimage.uniform_filter(data, smooth_radius)
# Threshold the blurred data (this needs to be a bit > 0 due to the blur)
thresh = data > threshold
# Fill any interior holes in the paws to get cleaner regions...
filled = ndimage.morphology.binary_fill_holes(thresh)
# Label each contiguous paw
coded_paws, num_paws = ndimage.label(filled)
# Isolate the extent of each paw
# find_objects returns a list of 2-tuples: (slice(...), slice(...))
# which represents a rectangular box around the object
data_slices = ndimage.find_objects(coded_paws)
return data_slices
def animate(self,frame,im = None):
"""Detects paws and animates the position and raw data of each frame
in the input file"""
# With matplotlib, it's much, much faster to just update the properties
# of a display object than it is to create a new one, so we'll just update
# the data and position of the same objects throughout this animation...
# Since we're making an animation with matplotlib, we need
# ion() instead of show()...
fig = plt.gcf()
ax = plt.axes([.25, .55, .6, .4], facecolor='y')
plt.axis('off')
# Make an image based on the first frame that we'll update later
# (The first frame is never actually displayed)
if im is None:
plt.imshow(frame,cmap='brg')
else:
plt.imshow(im)
plt.title('Image Space')
# Make 4 rectangles that we can later move to the position of each paw
rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]
[ax.add_patch(rect) for rect in rects]
# Process and display each frame
paw_slices = self.find_paws(frame)
# Hide any rectangles that might be visible
[rect.set_visible(False) for rect in rects]
# Set the position and size of a rectangle for each paw and display it
for slice, rect in zip(paw_slices, rects):
dy, dx = slice
rect.set_xy((dx.start, dy.start))
rect.set_width(dx.stop - dx.start + 1)
rect.set_height(dy.stop - dy.start + 1)
rect.set_visible(True)
| if ct == 'spherical':
return
color_iter = itertools.cycle(['navy', 'navy', 'cornflowerblue', 'gold',
'darkorange'])
ax =plt.gca()
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
alpha = 0.2
ell = Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
ell = Ellipse(mean, v[0]*4, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ell = Ellipse(mean, v[0]*2, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
# multi_normal = multivariate_normal(mean[0:2],u[0:2])
# ax.contour(np.sort(X[:,0]),np.sort(X[:,1]),
# multi_normal.pdf(self.XY).reshape(X.shape[0],X.shape[0]),
# colors='black',alpha=0.3)
ax.scatter(mean[0],mean[1],c='grey',zorder=10,s=100) | identifier_body |
iterative_gmm.py | import os, shutil
import itertools
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.io import loadmat
from scipy.ndimage import gaussian_filter1d
from scipy import interpolate
from scipy import linalg
from sklearn.covariance import EmpiricalCovariance, MinCovDet
from sklearn.decomposition import NMF, FastICA, PCA
from sklearn.metrics import homogeneity_score,homogeneity_completeness_v_measure
from sklearn import mixture
from skimage.restoration import inpaint
from scipy.stats import multivariate_normal
from sklearn.cluster import KMeans
import scipy.ndimage as ndimage
import scipy.spatial as spatial
import scipy.misc as misc
import matplotlib.patches as patches
from matplotlib.patches import Rectangle, Ellipse
class I_gmm:
def __init__(self):
self.XY = None
def plot_cov(self,means, covariances,ct):
if ct == 'spherical':
return
color_iter = itertools.cycle(['navy', 'navy', 'cornflowerblue', 'gold',
'darkorange'])
ax =plt.gca()
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
alpha = 0.2
ell = Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
ell = Ellipse(mean, v[0]*4, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ell = Ellipse(mean, v[0]*2, v[1]*2, 180. + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(alpha)
ax.add_artist(ell)
# multi_normal = multivariate_normal(mean[0:2],u[0:2])
# ax.contour(np.sort(X[:,0]),np.sort(X[:,1]),
# multi_normal.pdf(self.XY).reshape(X.shape[0],X.shape[0]),
# colors='black',alpha=0.3)
ax.scatter(mean[0],mean[1],c='grey',zorder=10,s=100)
def iterative_gmm(self,dataset = 'bb',fake = True,mode = 'gmm',binary = False,im_dir = './images/',savegif = False,title ='temp',bic_thresh = 0,maxiter = 40,nc =5,v_and_1 = False,thresh = 0.9,cov=[],n_components=2,covt='spherical',ra=False,pca = True):
'''
dataset: string
The filename of the material something like 'bb','pp'
fake: bool
Whether or not the data is fake, if it is not it will be cropped
mode: str
'fraction' will reduce the input to a combination of the relative signals
e.g. bin1 - bin0/sum
binary: bool
Whether or not to show the output as binary or not
nc: int
pca components
'''
# Clear the imagedir
if savegif:
folder = im_dir
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
if ra:
arrowsU = []
arrowsV = []
bic0 = np.infty
itern = 0
inds = [2,3,4]
label_true = loadmat('2'+dataset+'_mask')['BW']
X1 = loadmat('all'+dataset)['Z'][:,inds]
if not fake:
X1 = X1[400:,:].copy()
label_true = label_true[400:].copy()
length = 48
else:
length = 68
# This is code for just looking at the ratio of the bins
if mode == 'fraction':
# initialize vector for fraction
X2 = np.zeros([X1.shape[0],int(scipy.special.comb(5,2))])
result = [x for x in itertools.combinations(np.arange(5),2)]
for jj in range(0,X2.shape[1]):
r2 = np.reshape(abs(X1[:,result[jj][0]] - X1[:,result[jj][1]])/abs(X1[:,result[jj][0]] + X1[:,result[jj][1]]), (20,length), order="F")
X2[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ind = np.argsort(np.mean(X1,0))
# X1 = X2[:,ind[:4]]
else:
for jj in range(0,X1.shape[1]):
r2 = np.reshape(X1[:,jj], (20,length), order="F")
X1[:,jj] = np.reshape(gaussian_filter1d(r2,sigma=1),20*length,order="F")
ct = 'full'
if mode == 'bgmm':
bgmm = mixture.BayesianGaussianMixture(
n_components=n_components, covariance_type=covt)
elif mode == 'kmeans':
km = KMeans(n_clusters=n_components)
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=covt)
gmm1 = mixture.GaussianMixture(n_components=n_components,
covariance_type='full')
ims = []
pca = True
# Do the PCA decomposition
if pca:
X1 = PCA(n_components=nc).fit_transform(X1)
X3 = X1[:,0:2].copy()
fig = plt.figure(figsize=(10,10))
bics = []
for ii in range(0,maxiter):
X = X1.copy()
if mode == 'gmm':
y_pred = gmm.fit_predict(X)
elif mode == 'bgmm':
y_pred = bgmm.fit_predict(X)
y_ff = gmm.fit(X)
elif mode == 'kmeans':
y_pred = km.fit_predict(X)
y_ff = gmm.fit(X)
y_ff1 = gmm1.fit(X)
# if I should show the vmeasure
if v_and_1:
homo1,comp1,vs1 = homogeneity_completeness_v_measure(
label_true.squeeze(), y_pred)
bic = gmm.aic(X)
bic1 = gmm1.aic(X)
print(vs1,itern,bic,bic1)
else:
bic = gmm.aic(X)
print(bic)
# Stop if bic is lower
if bic - bic0 < bic_thresh:
bic0 = bic
else:
print('BIC got higher')
break
print(bic)
# map the bad values to zero
for kk in range(n_components):
temp = X[y_pred == kk,:]
if cov == 'robust':
robust_cov = MinCovDet().fit(temp)
else:
robust_cov = EmpiricalCovariance().fit(temp)
# Calculating the mahal distances
robust_mahal = robust_cov.mahalanobis(
temp - robust_cov.location_) ** (0.33)
if thresh < 1:
temp[robust_mahal > robust_mahal.max()*thresh] = 0
else:
# import pdb; pdb.set_trace()
temp[robust_mahal > np.sort(robust_mahal)[-thresh]] = 0
X[y_pred == kk,:] = temp
mask_one = X[:,1] == 0
if y_pred[3] == 0:
# Map top to zero if it is the wrong combo
y_pred = y_pred + 1
y_pred[y_pred == n_components] = 0
m_reshape = np.reshape(mask_one, (20,length), order="F")
if itern == 0:
y_0 = y_pred
# Plotting functions
ax0 = fig.add_subplot(111) | colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
ax0.set_title('New Method')
self.plot_cov(gmm1.means_, gmm1.covariances_,ct='full')
if itern == 0:
axes = plt.gca()
ylim = axes.get_ylim()
xlim = axes.get_xlim()
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
ax2 = plt.axes([.25, .55, .6, .4], facecolor='y')
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
plt.title('Image Space')
plt.xticks([])
plt.yticks([])
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
i, j = np.where(m_reshape == True)
# if binary:
# plt.imshow(y_reshape,cmap='brg')
# else:
plt.scatter(j,i,marker='x',c='k')
# import pdb; pdb.set_trace()
d = [i for i, x in enumerate(mask_one) if x]
ax0.scatter(X1[d,0],X1[d,1],marker='x',c='k')
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
itern += 1
X2 = X1.copy()
# Inpainting the zeros
r2 = np.reshape(X1, (20,length,X1.shape[1]), order="F")
X1 = np.reshape(inpaint.inpaint_biharmonic(
r2,m_reshape,multichannel=True),
(20*length,X1.shape[1]),order="F")
ax0.plot([X2[d,0],X1[d,0]],[X2[d,1],X1[d,1]],'r')
if ra:
arrowsU.append([X2[d,0],X1[d,0]])
arrowsV.append([X2[d,1],X1[d,1]])
if binary:
plt.imshow(y_reshape,cmap='brg')
else:
plt.imshow(np.reshape(X1[:,0], (20,length), order="F"))
if savegif:
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.clf()
# X_old = X.copy()
# np.save('bb',y_reshape)
# plt.figure()
# robust_mahal1.sort()
# plt.plot(robust_mahal1)
# plt.plot(250,robust_mahal1.max()*.87,'r*')
# plt.savefig('./images2/{}.png'.format(format(itern, "02")))
# plt.figure()
# robust_mahal2.sort()
# plt.plot(robust_mahal2)
# plt.plot(250,robust_mahal2.max()*.87,'r*')
# plt.savefig('./images3/{}.png'.format(format(itern, "02")))
itern += 1
fig = plt.figure(figsize=(10,10))
ax0 = fig.add_subplot(111)
colorz = ['b','r','g','m']
for jj,color in zip(range(a.min(),a.max()+1),colorz):
print(jj)
b = a == jj
b = [i for i, x in enumerate(b) if x]
if jj == 0:
c = b
ax0.scatter(X1[b,0],X1[b,1],c=colorz[(jj-a.min())])
self.plot_cov(gmm1.means_, gmm1.covariances_,ct)
ax0.scatter(X3[:,0],X3[:,1],c='k',alpha = 0.1)
ax0.set_title('New Method')
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
plt.text(.5*xlim[-1], ylim[0] + .005,'bad pts = {}'.format(format(len(c),"03")))
r = np.reshape(y_pred, (20,length), order="F")
if binary:
self.animate(y_reshape)
else:
self.animate(y_reshape, im=np.reshape(X1[:,0], (20,length), order="F"))
ax3 = plt.axes([.22, .15, .15, .1])
bics.append(bic)
plt.plot(bics)
plt.yticks([])
plt.xlabel('iteration')
plt.ylabel('BIC')
plt.savefig(im_dir + '{}.png'.format(format(itern, "02")))
plt.figure()
plt.imshow(r)
plt.xticks([])
plt.yticks([])
plt.figure()
r0 = np.reshape(y_0, (20,length), order="F")
plt.imshow(r - r0)
plt.xticks([])
plt.yticks([])
if savegif:
# save gif
files = os.listdir('./images')
images = []
for filename in files:
images.append(imageio.imread('./images/'+filename))
imageio.mimsave(title + '.mp4', images,fps=1)
imageio.mimsave(title + '.gif', images)
if ra:
return arrowsU,arrowsV
def find_paws(self,data, smooth_radius = 1, threshold = 0.0001):
# https://stackoverflow.com/questions/4087919/how-can-i-improve-my-paw-detection
"""Detects and isolates contiguous regions in the input array"""
# Blur the input data a bit so the paws have a continous footprint
data = ndimage.uniform_filter(data, smooth_radius)
# Threshold the blurred data (this needs to be a bit > 0 due to the blur)
thresh = data > threshold
# Fill any interior holes in the paws to get cleaner regions...
filled = ndimage.morphology.binary_fill_holes(thresh)
# Label each contiguous paw
coded_paws, num_paws = ndimage.label(filled)
# Isolate the extent of each paw
# find_objects returns a list of 2-tuples: (slice(...), slice(...))
# which represents a rectangular box around the object
data_slices = ndimage.find_objects(coded_paws)
return data_slices
def animate(self,frame,im = None):
"""Detects paws and animates the position and raw data of each frame
in the input file"""
# With matplotlib, it's much, much faster to just update the properties
# of a display object than it is to create a new one, so we'll just update
# the data and position of the same objects throughout this animation...
# Since we're making an animation with matplotlib, we need
# ion() instead of show()...
fig = plt.gcf()
ax = plt.axes([.25, .55, .6, .4], facecolor='y')
plt.axis('off')
# Make an image based on the first frame that we'll update later
# (The first frame is never actually displayed)
if im is None:
plt.imshow(frame,cmap='brg')
else:
plt.imshow(im)
plt.title('Image Space')
# Make 4 rectangles that we can later move to the position of each paw
rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]
[ax.add_patch(rect) for rect in rects]
# Process and display each frame
paw_slices = self.find_paws(frame)
# Hide any rectangles that might be visible
[rect.set_visible(False) for rect in rects]
# Set the position and size of a rectangle for each paw and display it
for slice, rect in zip(paw_slices, rects):
dy, dx = slice
rect.set_xy((dx.start, dy.start))
rect.set_width(dx.stop - dx.start + 1)
rect.set_height(dy.stop - dy.start + 1)
rect.set_visible(True) |
a = -(y_pred - label_true.squeeze())
y_reshape = np.reshape(a, (20,length), order="F")
| random_line_split |
error_format.rs | pub mod data;
use crate::data::tokens::Span;
use crate::data::{position::Position, warnings::Warnings, Interval};
use nom::{
error::{ContextError, ErrorKind, ParseError},
*,
};
pub use crate::data::error_info::ErrorInfo;
pub use data::CustomError;
// TODO: add link to docs
// Parsing Errors
pub const ERROR_PARENTHESES: &str = "list elem type ( ... ) not found";
pub const ERROR_PARENTHESES_END: &str =
"Invalid argument. Expecting one ',' between each argument or ')' to end the list";
pub const ERROR_NUMBER_AS_IDENT: &str = "Int/Float can't be used as identifier";
pub const ERROR_FLOW_STEP: &str = "syntax error.";
pub const ERROR_RESERVED: &str = "reserved keyword can't be used as identifier";
pub const ERROR_PARSING: &str =
"Invalid argument. One of the action keywords [say, do, if, ...] is missing";
pub const ERROR_REMEMBER: &str =
"'remember' must be assigning to a variable via '='. Example: 'remember key = value'";
pub const ERROR_USE: &str =
"'use' must be assigning a variable with keyword 'as'. Example: 'use value as key'";
pub const ERROR_ACTION_ARGUMENT: &str =
"expecting valid argument after action keywords. Example: say value";
pub const ERROR_IMPORT_ARGUMENT: &str =
"'import' expecting valid function name. Example: 'import function from flow'";
pub const ERROR_INSERT_ARGUMENT: &str =
"'insert' expecting valid step name. Example: 'insert step from flow'";
pub const ERROR_BREAK: &str = "break can only be used inside loops";
pub const ERROR_RETURN: &str = "return expects a value to return";
pub const ERROR_LEFT_BRACE: &str = "expecting '{'";
pub const ERROR_RIGHT_BRACE: &str = "expecting '}'";
pub const ERROR_RIGHT_BRACKET: &str = "expecting ']'";
pub const ERROR_GOTO_STEP: &str = "missing step name after goto";
pub const ERROR_IMPORT_STEP: &str = "missing step name after import";
pub const ERROR_DOUBLE_QUOTE: &str = "expecting '\"' to end string";
pub const ERROR_DOUBLE_OPEN_BRACE: &str = "expecting '{{' to begin expandable string";
pub const ERROR_DOUBLE_CLOSE_BRACE: &str = "expecting '}}' to end expandable string";
pub const ERROR_UNREACHABLE: &str = "unreachable";
pub const ERROR_WRONG_ARGUMENT_EXPANDABLE_STRING: &str =
"wrong argument(s) given to expandable string";
pub const ERROR_FN_SCOPE: &str =
"invalid action. Use a valid action for this type of scope [do, if, return, ...]"; //\ndoc: https://docs.csml.dev/language/native-csml-functions
// Linter Errors
pub const ERROR_NO_FLOW: &str = "bot must have at least one flow";
// ##Interpreter Errors
// ### Validation
pub const ERROR_STEP_EXIST: &str = "step does not exist";
pub const ERROR_INVALID_FLOW: &str = "invalid flow: ";
pub const ERROR_START_INSTRUCTIONS: &str =
"to start an action one of the following instructions is expected: [say, do, if, foreach, goto]";
pub const ERROR_FOREACH: &str =
"foreach only accepts iterable elements like arrays and strings. Example: foreach(elem) in [1, 2, 3]";
pub const ERROR_FIND_BY_INDEX: &str =
"index must be of type int or string. Example var.[42] or var.[\"key\"]";
pub const ERROR_ASSIGN_IDENT: &str = "key must be of type identifier";
pub const ERROR_SIZE_IDENT: &str = "key can't be longer than 255 character";
pub const ERROR_NUMBER_AS_KEY: &str = "Int/Float can't be used as key";
pub const ERROR_KEY_ALPHANUMERIC: &str = "key must be alphanumeric";
pub const ERROR_FUNCTIONS_ARGS: &str = "function arguments must be in an array";
pub const ERROR_EXPR_TO_LITERAL: &str = "expression can't be converted to Literal";
pub const ERROR_PAYLOAD_EXCEED_MAX_SIZE: &str = "payload exceeds max payload size (16kb)";
pub const ERROR_STEP_LIMIT: &str =
"[Infinite loop] Step limit reached: 100 steps where executed in a single run";
// Event
pub const ERROR_EVENT_CONTENT_TYPE: &str = "event can only be of ContentType::Event";
// Goto
pub const ERROR_GOTO_VAR: &str = "variables in goto need to resolve as strings";
// Component
pub const ERROR_COMPONENT_NAMESPACE: &str = "component must have a function applied";
pub const ERROR_COMPONENT_UNKNOWN: &str = "function does not exist for component";
// Fn API
pub const ERROR_FN_ID: &str = "App name must be of type string";
pub const ERROR_FN_ENDPOINT: &str = "App can not be called because apps_endpoint is not set in bot";
pub const ERROR_FAIL_RESPONSE_JSON: &str = "failed to read response as JSON";
// ### Import
pub const ERROR_IMPORT_FAIL: &str = "import failed at";
pub const ERROR_IMPORT_STEP_FLOW: &str = "step not found in flow";
// ### Variables
pub const ERROR_GET_VAR_INFO: &str = "Expression must be a variable";
pub const ERROR_JSON_TO_LITERAL: &str = "Number is larger than a 64-bit integer";
// ### Memory
pub const ERROR_STEP_MEMORY: &str = "Variable does not exist in step's memory";
pub const ERROR_FIND_MEMORY: &str = "is used before it was saved in memory";
// ### Functions
pub const ERROR_FN_ARGS: &str = "function arguments are not valid";
pub const ERROR_FN_COLON: &str =
"Expecting ':' at the end of function prototype. Example: 'fn name():' ";
// ### Built-in
pub const ERROR_TEXT: &str =
"Text component expects one argument of type string. Example: Text(\"hola\")";
pub const ERROR_TYPING: &str =
"Typing component expects one argument of type int or float. Example: Typing(3, ..)";
pub const ERROR_WAIT: &str =
"Wait component expects one argument of type int or float. Example: Wait(3)";
pub const ERROR_BUTTON: &str =
"Button component expects at least one argument of type string. Example: Button(\"hola\")";
pub const ERROR_CARD_BUTTON: &str = "argument 'buttons' in Card component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CARD_TITLE: &str = "argument title in Card component must be of type String";
pub const ERROR_QUESTION: &str = "argument 'buttons' in Question component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CAROUSEL: &str =
"argument 'cards' in Carousel component must be of type Array<Card>";
pub const ERROR_ONE_OF: &str =
"OneOf builtin expects one value of type Array. Example: OneOf( [1, 2, 3] )";
pub const ERROR_VAR_EXISTS: &str =
"Exists builtin expects one value of type String. Example: Exists( \"var_name\" )";
pub const ERROR_SHUFFLE: &str =
"Shuffle builtin expects one value of type Array. Example: Shuffle( [1, 2, 3] )";
pub const ERROR_LENGTH: &str =
"Length builtin expects one value of type Array or String. Example: Length( value )";
pub const ERROR_FIND: &str = "Find builtin expects 'in' param to be of type String. Example: Find(value, in = \"hola\", case_sensitive = true)";
pub const ERROR_FLOOR: &str =
"Floor builtin expects one argument of type float. Example: Floor(4.2)";
pub const ERROR_UUID: &str =
"UUID builtin expects one optional argument of type String. Example: UUID(\"v4\") or UUID(\"v1\")";
pub const ERROR_IMAGE: &str =
"Image component expects one argument of type string. Example: Image(\"hola\")";
pub const ERROR_URL: &str = "Url component expects one argument of type string and 2 optional string arguments: text, title. Example: Url(\"hola\", text = \"text\", title = \"title\")";
pub const ERROR_VIDEO: &str =
"Video component expects one argument of type string. Example: Video(url = \"hola\")";
pub const ERROR_AUDIO: &str =
"Audio component expects one argument of type string. Example: Audio(url = \"hola\")";
pub const ERROR_FILE: &str =
"File component expects one argument of type string. Example: File(url = \"hola\")";
pub const ERROR_HTTP_GET_VALUE: &str =
"not found in HTTP object. Use the HTTP() builtin to construct the correct object to make HTTP calls";
pub const ERROR_HTTP_QUERY_VALUES: &str =
"must have a value of type String. Example: {key: \"value\"}";
pub const ERROR_HTTP: &str =
"HTTP builtin expects one url of type string. Example: HTTP(\"https://clevy.io\")";
pub const ERROR_JWT: &str = "JWT builtin expects payload as argument. Example: JWT({
\"user\": \"name\",
\"somekey\": {
\"somevalue\": 42
},
\"exp\": 1618064023,
\"iss\": \"CSML STUDIO\"
})";
pub const ERROR_SMTP: &str =
"SMTP builtin expects SMTP Server Address. Example: SMTP(\"smtp.gmail.com\")";
pub const ERROR_CRYPTO: &str =
"CRYPTO builtin expects one argument of type string. Example: CRYPTO(\"text\")";
pub const ERROR_BUILTIN_UNKNOWN: &str = "Unknown builtin";
// ### native Components
pub const ERROR_HTTP_NOT_DATA: &str = "bad format: no 'data' in HTTP response";
pub const ERROR_NATIVE_COMPONENT: &str = "native component does not exist";
// ### Constants
pub const ERROR_CONSTANT_MUTABLE_FUNCTION: &str =
"Invalid operation constants can not execute self mutable functions";
pub const ERROR_INVALID_CONSTANT_EXPR: &str =
"Constant invalid expression type: constants can not be assign this type of expression";
// ### Primitives
// #### Indexing
pub const ERROR_INDEXING: &str =
"indexing can only be done in ARRAY, OBJECT or STRING primitive types";
// #### Closure
pub const ERROR_CLOSURE_UNKNOWN_METHOD: &str = "Closure don't have methods";
// #### Boolean
pub const ERROR_BOOLEAN_UNKNOWN_METHOD: &str = "is not a method of Boolean";
// #### NUMBER
pub const ERROR_NUMBER_POW: &str =
"[pow] takes one parameter of type int or float usage: number.pow(42)";
// #### Float
pub const ERROR_FLOAT_UNKNOWN_METHOD: &str = "is not a method of Float";
// #### Int
pub const ERROR_INT_UNKNOWN_METHOD: &str = "is not a method of Int";
// #### Null
pub const ERROR_NULL_UNKNOWN_METHOD: &str = "is not a method of Null";
// #### String
pub const ERROR_STRING_DO_MATCH: &str =
"[do_match] takes one parameter of type String. Usage: string.do_match(\"tag\")";
pub const ERROR_STRING_APPEND: &str =
"[append] takes one parameter of type String. Usage: string.append(\"text to append\")";
pub const ERROR_STRING_CONTAINS: &str =
"[contains] takes one parameter of type String. Usage: string.contains(\"word\")";
pub const ERROR_STRING_REPLACE: &str =
"[replace] takes tow parameter of type String. Usage: \"this is old\".replace(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_ALL: &str =
"[replace_all] takes tow parameter of type String. Usage: \"old old old old\".replace_all(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_REGEX: &str =
"[replace_regex] takes tow parameter of type String. Usage: \"hello world\".replace_regex(\"world\", \"Clevy\")";
pub const ERROR_STRING_CONTAINS_REGEX: &str =
"[contains_regex] takes one parameter of type String. Usage: string.contains_regex(\"regex\")";
pub const ERROR_STRING_VALID_REGEX: &str = "parameter must be a valid regex expression"; // link to docs
pub const ERROR_STRING_START_WITH: &str =
"[starts_with] takes one parameter of type String. Usage: string.starts_with(\"tag\")";
pub const ERROR_STRING_START_WITH_REGEX: &str = "[starts_with_regex] takes one parameter of type String. Usage: string.start_with_regex(\"regex\")";
pub const ERROR_STRING_END_WITH: &str =
"[ends_with] takes one parameter of type String. Usage: string.ends_with(\"tag\")";
pub const ERROR_STRING_END_WITH_REGEX: &str =
"[ends_with_regex] takes one parameter of type String. Usage: string.ends_with_regex(\"regex\")";
pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]";
pub const ERROR_STRING_SPLIT: &str =
"[split] takes one parameter of type String. Usage: string.split(\"separator\")";
pub const ERROR_STRING_MATCH_REGEX: &str =
"[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")";
pub const ERROR_STRING_POW: &str =
"[pow] takes one parameter of type Float or Int. Usage: string.pow(number)";
pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with 'string.is_number() == true' ";
pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with 'string.is_number() == true' to check it";
pub const ERROR_STRING_RHS: &str = "rhs must be of type string";
pub const ERROR_SLICE_ARG_INT: &str =
".slice(start, optional<end>) args need to be of type Integer";
pub const ERROR_SLICE_ARG_LEN: &str =
".slice(start, optional<end>) args need to be inferior to the string length";
pub const ERROR_SLICE_ARG2: &str =
".slice(start, optional<end>) end need to be superior to start in value ex: .slice(2, 5)";
pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String";
// #### Array
pub const ERROR_ARRAY_TYPE: &str = "value must be of type array";
pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist";
pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int";
pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]";
pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()";
pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is ";
pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty";
pub const ERROR_ARRAY_INSERT_AT: &str =
"[insert_at] takes two arguments. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_INSERT_AT_INT: &str =
"[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_REMOVE_AT: &str =
"[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) ";
pub const ERROR_ARRAY_JOIN: &str =
"[join] takes one parameter of type String. Usage: array.join(\"elem\") ";
pub const ERROR_ARRAY_INDEX_OF: &str =
"[index_of] takes one parameter. Usage: array.index_of(elem)";
pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)";
pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array";
// #### CRYPTO OBJECT
// ## HMAC and HASH OBJECT
pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String";
pub const ERROR_HASH_ALGO: &str =
"Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512";
pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string";
pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly";
pub const ERROR_DIGEST_ALGO: &str =
"Invalid Digest Algorithm, supported Algorithms are hex, base64";
// #### JWT OBJECT
pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512";
pub const ERROR_JWT_SECRET: &str = "secret must be of type String";
pub const ERROR_JWT_SIGN_CLAIMS: &str =
"JWT(claims) command expect argument 'claims' of type Object";
pub const ERROR_JWT_SIGN_ALGO: &str =
"JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String";
pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String";
pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String";
pub const ERROR_JWT_DECODE_ALGO: &str =
"JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String";
pub const ERROR_JWT_DECODE_SECRET: &str =
"JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String";
pub const ERROR_JWT_VALIDATION_CLAIMS: &str =
"JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object";
pub const ERROR_JWT_VALIDATION_ALGO: &str =
"JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String";
pub const ERROR_JWT_VALIDATION_SECRETE: &str =
"JWT(jwt).verify(claims, algo, secret) expect third argument 'secrete' of type String";
// #### HTTP OBJECT
pub const ERROR_HTTP_SET: &str =
"[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )";
pub const ERROR_HTTP_QUERY: &str =
"[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )";
pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage";
pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP";
// #### OBJECT
pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object";
pub const ERROR_OBJECT_GET: &str = "key does not exist";
pub const ERROR_OBJECT_CONTAINS: &str =
"[contains] takes one argument of type String. Usage: object.contains(\"key\")";
pub const ERROR_OBJECT_GET_GENERICS: &str =
"[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")";
pub const ERROR_OBJECT_INSERT: &str =
"[insert] take tow arguments. Usage: object.insert(string, any_type)";
pub const ERROR_OBJECT_ASSIGN: &str =
"[assign] take one argument. Usage: object.assign({\"key\": \"value\"})";
pub const ERROR_OBJECT_REMOVE: &str =
"[remove] takes one argument of type String. Usage: object.remove(\"key\")";
pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String";
pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object";
// #### METHODS
pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named";
pub const ERROR_OPS: &str = "[!] Ops: Illegal operation";
pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero";
pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero";
pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:";
pub const OVERFLOWING_OPERATION: &str = "overflowing operation:";
////////////////////////////////////////////////////////////////////////////////
// PRiVTE FUNCTION
////////////////////////////////////////////////////////////////////////////////
fn add_context_to_error_message<'a>(
flow_slice: Span<'a>,
message: String,
line_number: u32,
column: usize,
offset: usize,
) -> String {
use std::fmt::Write;
let mut result = String::new();
let prefix = &flow_slice.fragment().as_bytes()[..offset];
// Find the line that includes the subslice:
// Find the *last* newline before the substring starts
let line_begin = prefix
.iter()
.rev()
.position(|&b| b == b'\n')
.map(|pos| offset - pos)
.unwrap_or(0);
// Find the full line after that newline
let line = flow_slice.fragment()[line_begin..]
.lines()
.next()
.unwrap_or(&flow_slice.fragment()[line_begin..])
.trim_end();
write!(
&mut result,
"at line {line_number},\n\
{line}\n\
{caret:>column$}\n\
{context}\n\n",
line_number = line_number,
context = message,
line = line,
caret = '^',
column = column,
)
// Because `write!` to a `String` is infallible, this `unwrap` is fine.
.unwrap();
result
}
////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTION
////////////////////////////////////////////////////////////////////////////////
pub fn gen_error_info(position: Position, message: String) -> ErrorInfo {
ErrorInfo::new(position, message)
}
pub fn gen_warning_info(position: Position, message: String) -> Warnings {
Warnings { position, message }
}
pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
|
pub fn gen_nom_failure<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Failure(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn convert_error_from_span<'a>(flow_slice: Span<'a>, e: CustomError<Span<'a>>) -> String {
let message = e.error.to_owned();
let offset = e.input.location_offset();
// Count the number of newlines in the first `offset` bytes of input
let line_number = e.input.location_line();
// The (1-indexed) column number is the offset of our substring into that line
let column = e.input.get_column();
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn convert_error_from_interval<'a>(
flow_slice: Span<'a>,
message: String,
interval: Interval,
) -> String {
let offset = interval.offset;
// Count the number of newlines in the first `offset` bytes of input
let line_number = interval.start_line;
// The (1-indexed) column number is the offset of our substring into that line
let column = interval.start_column as usize;
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn gen_infinite_loop_error_msg(infinite_loop: Vec<(String, String)>) -> String {
infinite_loop
.iter()
.fold(String::new(), |mut acc, (flow, step)| {
acc.push_str(&format!("[flow] {}, [step] {}\n", flow, step));
acc
})
}
| {
Err::Error(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
} | identifier_body |
error_format.rs | pub mod data;
use crate::data::tokens::Span;
use crate::data::{position::Position, warnings::Warnings, Interval};
use nom::{
error::{ContextError, ErrorKind, ParseError},
*,
};
pub use crate::data::error_info::ErrorInfo;
pub use data::CustomError;
// TODO: add link to docs
// Parsing Errors
pub const ERROR_PARENTHESES: &str = "list elem type ( ... ) not found";
pub const ERROR_PARENTHESES_END: &str =
"Invalid argument. Expecting one ',' between each argument or ')' to end the list";
pub const ERROR_NUMBER_AS_IDENT: &str = "Int/Float can't be used as identifier";
pub const ERROR_FLOW_STEP: &str = "syntax error.";
pub const ERROR_RESERVED: &str = "reserved keyword can't be used as identifier";
pub const ERROR_PARSING: &str =
"Invalid argument. One of the action keywords [say, do, if, ...] is missing";
pub const ERROR_REMEMBER: &str =
"'remember' must be assigning to a variable via '='. Example: 'remember key = value'";
pub const ERROR_USE: &str =
"'use' must be assigning a variable with keyword 'as'. Example: 'use value as key'";
pub const ERROR_ACTION_ARGUMENT: &str =
"expecting valid argument after action keywords. Example: say value";
pub const ERROR_IMPORT_ARGUMENT: &str =
"'import' expecting valid function name. Example: 'import function from flow'";
pub const ERROR_INSERT_ARGUMENT: &str =
"'insert' expecting valid step name. Example: 'insert step from flow'";
pub const ERROR_BREAK: &str = "break can only be used inside loops";
pub const ERROR_RETURN: &str = "return expects a value to return";
pub const ERROR_LEFT_BRACE: &str = "expecting '{'";
pub const ERROR_RIGHT_BRACE: &str = "expecting '}'";
pub const ERROR_RIGHT_BRACKET: &str = "expecting ']'";
pub const ERROR_GOTO_STEP: &str = "missing step name after goto";
pub const ERROR_IMPORT_STEP: &str = "missing step name after import";
pub const ERROR_DOUBLE_QUOTE: &str = "expecting '\"' to end string";
pub const ERROR_DOUBLE_OPEN_BRACE: &str = "expecting '{{' to begin expandable string";
pub const ERROR_DOUBLE_CLOSE_BRACE: &str = "expecting '}}' to end expandable string";
pub const ERROR_UNREACHABLE: &str = "unreachable";
pub const ERROR_WRONG_ARGUMENT_EXPANDABLE_STRING: &str =
"wrong argument(s) given to expandable string";
pub const ERROR_FN_SCOPE: &str =
"invalid action. Use a valid action for this type of scope [do, if, return, ...]"; //\ndoc: https://docs.csml.dev/language/native-csml-functions
// Linter Errors
pub const ERROR_NO_FLOW: &str = "bot must have at least one flow";
// ##Interpreter Errors
// ### Validation
pub const ERROR_STEP_EXIST: &str = "step does not exist";
pub const ERROR_INVALID_FLOW: &str = "invalid flow: ";
pub const ERROR_START_INSTRUCTIONS: &str =
"to start an action one of the following instructions is expected: [say, do, if, foreach, goto]";
pub const ERROR_FOREACH: &str =
"foreach only accepts iterable elements like arrays and strings. Example: foreach(elem) in [1, 2, 3]";
pub const ERROR_FIND_BY_INDEX: &str =
"index must be of type int or string. Example var.[42] or var.[\"key\"]";
pub const ERROR_ASSIGN_IDENT: &str = "key must be of type identifier";
pub const ERROR_SIZE_IDENT: &str = "key can't be longer than 255 character";
pub const ERROR_NUMBER_AS_KEY: &str = "Int/Float can't be used as key";
pub const ERROR_KEY_ALPHANUMERIC: &str = "key must be alphanumeric";
pub const ERROR_FUNCTIONS_ARGS: &str = "function arguments must be in an array";
pub const ERROR_EXPR_TO_LITERAL: &str = "expression can't be converted to Literal";
pub const ERROR_PAYLOAD_EXCEED_MAX_SIZE: &str = "payload exceeds max payload size (16kb)";
pub const ERROR_STEP_LIMIT: &str =
"[Infinite loop] Step limit reached: 100 steps where executed in a single run";
// Event
pub const ERROR_EVENT_CONTENT_TYPE: &str = "event can only be of ContentType::Event";
// Goto
pub const ERROR_GOTO_VAR: &str = "variables in goto need to resolve as strings";
// Component
pub const ERROR_COMPONENT_NAMESPACE: &str = "component must have a function applied";
pub const ERROR_COMPONENT_UNKNOWN: &str = "function does not exist for component";
// Fn API
pub const ERROR_FN_ID: &str = "App name must be of type string";
pub const ERROR_FN_ENDPOINT: &str = "App can not be called because apps_endpoint is not set in bot";
pub const ERROR_FAIL_RESPONSE_JSON: &str = "failed to read response as JSON";
// ### Import
pub const ERROR_IMPORT_FAIL: &str = "import failed at";
pub const ERROR_IMPORT_STEP_FLOW: &str = "step not found in flow";
// ### Variables
pub const ERROR_GET_VAR_INFO: &str = "Expression must be a variable";
pub const ERROR_JSON_TO_LITERAL: &str = "Number is larger than a 64-bit integer";
// ### Memory
pub const ERROR_STEP_MEMORY: &str = "Variable does not exist in step's memory";
pub const ERROR_FIND_MEMORY: &str = "is used before it was saved in memory";
// ### Functions
pub const ERROR_FN_ARGS: &str = "function arguments are not valid";
pub const ERROR_FN_COLON: &str =
"Expecting ':' at the end of function prototype. Example: 'fn name():' ";
// ### Built-in
pub const ERROR_TEXT: &str =
"Text component expects one argument of type string. Example: Text(\"hola\")";
pub const ERROR_TYPING: &str =
"Typing component expects one argument of type int or float. Example: Typing(3, ..)";
pub const ERROR_WAIT: &str =
"Wait component expects one argument of type int or float. Example: Wait(3)";
pub const ERROR_BUTTON: &str =
"Button component expects at least one argument of type string. Example: Button(\"hola\")";
pub const ERROR_CARD_BUTTON: &str = "argument 'buttons' in Card component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CARD_TITLE: &str = "argument title in Card component must be of type String";
pub const ERROR_QUESTION: &str = "argument 'buttons' in Question component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CAROUSEL: &str =
"argument 'cards' in Carousel component must be of type Array<Card>";
pub const ERROR_ONE_OF: &str =
"OneOf builtin expects one value of type Array. Example: OneOf( [1, 2, 3] )";
pub const ERROR_VAR_EXISTS: &str =
"Exists builtin expects one value of type String. Example: Exists( \"var_name\" )";
pub const ERROR_SHUFFLE: &str =
"Shuffle builtin expects one value of type Array. Example: Shuffle( [1, 2, 3] )";
pub const ERROR_LENGTH: &str =
"Length builtin expects one value of type Array or String. Example: Length( value )";
pub const ERROR_FIND: &str = "Find builtin expects 'in' param to be of type String. Example: Find(value, in = \"hola\", case_sensitive = true)";
pub const ERROR_FLOOR: &str =
"Floor builtin expects one argument of type float. Example: Floor(4.2)";
pub const ERROR_UUID: &str =
"UUID builtin expects one optional argument of type String. Example: UUID(\"v4\") or UUID(\"v1\")";
pub const ERROR_IMAGE: &str =
"Image component expects one argument of type string. Example: Image(\"hola\")";
pub const ERROR_URL: &str = "Url component expects one argument of type string and 2 optional string arguments: text, title. Example: Url(\"hola\", text = \"text\", title = \"title\")";
pub const ERROR_VIDEO: &str =
"Video component expects one argument of type string. Example: Video(url = \"hola\")";
pub const ERROR_AUDIO: &str =
"Audio component expects one argument of type string. Example: Audio(url = \"hola\")";
pub const ERROR_FILE: &str =
"File component expects one argument of type string. Example: File(url = \"hola\")";
pub const ERROR_HTTP_GET_VALUE: &str =
"not found in HTTP object. Use the HTTP() builtin to construct the correct object to make HTTP calls";
pub const ERROR_HTTP_QUERY_VALUES: &str =
"must have a value of type String. Example: {key: \"value\"}";
pub const ERROR_HTTP: &str =
"HTTP builtin expects one url of type string. Example: HTTP(\"https://clevy.io\")";
pub const ERROR_JWT: &str = "JWT builtin expects payload as argument. Example: JWT({
\"user\": \"name\",
\"somekey\": {
\"somevalue\": 42
},
\"exp\": 1618064023,
\"iss\": \"CSML STUDIO\"
})";
pub const ERROR_SMTP: &str =
"SMTP builtin expects SMTP Server Address. Example: SMTP(\"smtp.gmail.com\")";
pub const ERROR_CRYPTO: &str =
"CRYPTO builtin expects one argument of type string. Example: CRYPTO(\"text\")";
pub const ERROR_BUILTIN_UNKNOWN: &str = "Unknown builtin";
// ### native Components
pub const ERROR_HTTP_NOT_DATA: &str = "bad format: no 'data' in HTTP response";
pub const ERROR_NATIVE_COMPONENT: &str = "native component does not exist";
// ### Constants
pub const ERROR_CONSTANT_MUTABLE_FUNCTION: &str =
"Invalid operation constants can not execute self mutable functions";
pub const ERROR_INVALID_CONSTANT_EXPR: &str =
"Constant invalid expression type: constants can not be assign this type of expression";
// ### Primitives
// #### Indexing
pub const ERROR_INDEXING: &str =
"indexing can only be done in ARRAY, OBJECT or STRING primitive types";
// #### Closure
pub const ERROR_CLOSURE_UNKNOWN_METHOD: &str = "Closure don't have methods";
// #### Boolean
pub const ERROR_BOOLEAN_UNKNOWN_METHOD: &str = "is not a method of Boolean";
// #### NUMBER
pub const ERROR_NUMBER_POW: &str =
"[pow] takes one parameter of type int or float usage: number.pow(42)";
// #### Float
pub const ERROR_FLOAT_UNKNOWN_METHOD: &str = "is not a method of Float";
// #### Int
pub const ERROR_INT_UNKNOWN_METHOD: &str = "is not a method of Int";
// #### Null
pub const ERROR_NULL_UNKNOWN_METHOD: &str = "is not a method of Null";
// #### String
pub const ERROR_STRING_DO_MATCH: &str =
"[do_match] takes one parameter of type String. Usage: string.do_match(\"tag\")";
pub const ERROR_STRING_APPEND: &str =
"[append] takes one parameter of type String. Usage: string.append(\"text to append\")";
pub const ERROR_STRING_CONTAINS: &str =
"[contains] takes one parameter of type String. Usage: string.contains(\"word\")";
pub const ERROR_STRING_REPLACE: &str =
"[replace] takes tow parameter of type String. Usage: \"this is old\".replace(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_ALL: &str =
"[replace_all] takes tow parameter of type String. Usage: \"old old old old\".replace_all(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_REGEX: &str =
"[replace_regex] takes tow parameter of type String. Usage: \"hello world\".replace_regex(\"world\", \"Clevy\")";
pub const ERROR_STRING_CONTAINS_REGEX: &str =
"[contains_regex] takes one parameter of type String. Usage: string.contains_regex(\"regex\")";
pub const ERROR_STRING_VALID_REGEX: &str = "parameter must be a valid regex expression"; // link to docs
pub const ERROR_STRING_START_WITH: &str =
"[starts_with] takes one parameter of type String. Usage: string.starts_with(\"tag\")";
pub const ERROR_STRING_START_WITH_REGEX: &str = "[starts_with_regex] takes one parameter of type String. Usage: string.start_with_regex(\"regex\")";
pub const ERROR_STRING_END_WITH: &str =
"[ends_with] takes one parameter of type String. Usage: string.ends_with(\"tag\")";
pub const ERROR_STRING_END_WITH_REGEX: &str =
"[ends_with_regex] takes one parameter of type String. Usage: string.ends_with_regex(\"regex\")";
pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]";
pub const ERROR_STRING_SPLIT: &str =
"[split] takes one parameter of type String. Usage: string.split(\"separator\")";
pub const ERROR_STRING_MATCH_REGEX: &str =
"[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")";
pub const ERROR_STRING_POW: &str =
"[pow] takes one parameter of type Float or Int. Usage: string.pow(number)";
pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with 'string.is_number() == true' ";
pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with 'string.is_number() == true' to check it";
pub const ERROR_STRING_RHS: &str = "rhs must be of type string";
pub const ERROR_SLICE_ARG_INT: &str =
".slice(start, optional<end>) args need to be of type Integer";
pub const ERROR_SLICE_ARG_LEN: &str =
".slice(start, optional<end>) args need to be inferior to the string length";
pub const ERROR_SLICE_ARG2: &str =
".slice(start, optional<end>) end need to be superior to start in value ex: .slice(2, 5)";
pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String";
// #### Array
pub const ERROR_ARRAY_TYPE: &str = "value must be of type array";
pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist";
pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int";
pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]";
pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()";
pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is ";
pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty";
pub const ERROR_ARRAY_INSERT_AT: &str =
"[insert_at] takes two arguments. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_INSERT_AT_INT: &str =
"[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_REMOVE_AT: &str =
"[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) ";
pub const ERROR_ARRAY_JOIN: &str =
"[join] takes one parameter of type String. Usage: array.join(\"elem\") ";
pub const ERROR_ARRAY_INDEX_OF: &str =
"[index_of] takes one parameter. Usage: array.index_of(elem)";
pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)";
pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array";
// #### CRYPTO OBJECT
// ## HMAC and HASH OBJECT
pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String";
pub const ERROR_HASH_ALGO: &str =
"Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512";
pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string";
pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly";
pub const ERROR_DIGEST_ALGO: &str =
"Invalid Digest Algorithm, supported Algorithms are hex, base64";
// #### JWT OBJECT
pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512";
pub const ERROR_JWT_SECRET: &str = "secret must be of type String";
pub const ERROR_JWT_SIGN_CLAIMS: &str =
"JWT(claims) command expect argument 'claims' of type Object";
pub const ERROR_JWT_SIGN_ALGO: &str =
"JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String";
pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String";
pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String";
pub const ERROR_JWT_DECODE_ALGO: &str =
"JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String";
pub const ERROR_JWT_DECODE_SECRET: &str = | "JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String";
pub const ERROR_JWT_VALIDATION_CLAIMS: &str =
"JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object";
pub const ERROR_JWT_VALIDATION_ALGO: &str =
"JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String";
pub const ERROR_JWT_VALIDATION_SECRETE: &str =
"JWT(jwt).verify(claims, algo, secret) expect third argument 'secrete' of type String";
// #### HTTP OBJECT
pub const ERROR_HTTP_SET: &str =
"[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )";
pub const ERROR_HTTP_QUERY: &str =
"[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )";
pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage";
pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP";
// #### OBJECT
pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object";
pub const ERROR_OBJECT_GET: &str = "key does not exist";
pub const ERROR_OBJECT_CONTAINS: &str =
"[contains] takes one argument of type String. Usage: object.contains(\"key\")";
pub const ERROR_OBJECT_GET_GENERICS: &str =
"[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")";
pub const ERROR_OBJECT_INSERT: &str =
"[insert] take tow arguments. Usage: object.insert(string, any_type)";
pub const ERROR_OBJECT_ASSIGN: &str =
"[assign] take one argument. Usage: object.assign({\"key\": \"value\"})";
pub const ERROR_OBJECT_REMOVE: &str =
"[remove] takes one argument of type String. Usage: object.remove(\"key\")";
pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String";
pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object";
// #### METHODS
pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named";
pub const ERROR_OPS: &str = "[!] Ops: Illegal operation";
pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero";
pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero";
pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:";
pub const OVERFLOWING_OPERATION: &str = "overflowing operation:";
////////////////////////////////////////////////////////////////////////////////
// PRiVTE FUNCTION
////////////////////////////////////////////////////////////////////////////////
fn add_context_to_error_message<'a>(
flow_slice: Span<'a>,
message: String,
line_number: u32,
column: usize,
offset: usize,
) -> String {
use std::fmt::Write;
let mut result = String::new();
let prefix = &flow_slice.fragment().as_bytes()[..offset];
// Find the line that includes the subslice:
// Find the *last* newline before the substring starts
let line_begin = prefix
.iter()
.rev()
.position(|&b| b == b'\n')
.map(|pos| offset - pos)
.unwrap_or(0);
// Find the full line after that newline
let line = flow_slice.fragment()[line_begin..]
.lines()
.next()
.unwrap_or(&flow_slice.fragment()[line_begin..])
.trim_end();
write!(
&mut result,
"at line {line_number},\n\
{line}\n\
{caret:>column$}\n\
{context}\n\n",
line_number = line_number,
context = message,
line = line,
caret = '^',
column = column,
)
// Because `write!` to a `String` is infallible, this `unwrap` is fine.
.unwrap();
result
}
////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTION
////////////////////////////////////////////////////////////////////////////////
pub fn gen_error_info(position: Position, message: String) -> ErrorInfo {
ErrorInfo::new(position, message)
}
pub fn gen_warning_info(position: Position, message: String) -> Warnings {
Warnings { position, message }
}
pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Error(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn gen_nom_failure<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Failure(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn convert_error_from_span<'a>(flow_slice: Span<'a>, e: CustomError<Span<'a>>) -> String {
let message = e.error.to_owned();
let offset = e.input.location_offset();
// Count the number of newlines in the first `offset` bytes of input
let line_number = e.input.location_line();
// The (1-indexed) column number is the offset of our substring into that line
let column = e.input.get_column();
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn convert_error_from_interval<'a>(
flow_slice: Span<'a>,
message: String,
interval: Interval,
) -> String {
let offset = interval.offset;
// Count the number of newlines in the first `offset` bytes of input
let line_number = interval.start_line;
// The (1-indexed) column number is the offset of our substring into that line
let column = interval.start_column as usize;
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn gen_infinite_loop_error_msg(infinite_loop: Vec<(String, String)>) -> String {
infinite_loop
.iter()
.fold(String::new(), |mut acc, (flow, step)| {
acc.push_str(&format!("[flow] {}, [step] {}\n", flow, step));
acc
})
} | random_line_split |
|
error_format.rs | pub mod data;
use crate::data::tokens::Span;
use crate::data::{position::Position, warnings::Warnings, Interval};
use nom::{
error::{ContextError, ErrorKind, ParseError},
*,
};
pub use crate::data::error_info::ErrorInfo;
pub use data::CustomError;
// TODO: add link to docs
// Parsing Errors
pub const ERROR_PARENTHESES: &str = "list elem type ( ... ) not found";
pub const ERROR_PARENTHESES_END: &str =
"Invalid argument. Expecting one ',' between each argument or ')' to end the list";
pub const ERROR_NUMBER_AS_IDENT: &str = "Int/Float can't be used as identifier";
pub const ERROR_FLOW_STEP: &str = "syntax error.";
pub const ERROR_RESERVED: &str = "reserved keyword can't be used as identifier";
pub const ERROR_PARSING: &str =
"Invalid argument. One of the action keywords [say, do, if, ...] is missing";
pub const ERROR_REMEMBER: &str =
"'remember' must be assigning to a variable via '='. Example: 'remember key = value'";
pub const ERROR_USE: &str =
"'use' must be assigning a variable with keyword 'as'. Example: 'use value as key'";
pub const ERROR_ACTION_ARGUMENT: &str =
"expecting valid argument after action keywords. Example: say value";
pub const ERROR_IMPORT_ARGUMENT: &str =
"'import' expecting valid function name. Example: 'import function from flow'";
pub const ERROR_INSERT_ARGUMENT: &str =
"'insert' expecting valid step name. Example: 'insert step from flow'";
pub const ERROR_BREAK: &str = "break can only be used inside loops";
pub const ERROR_RETURN: &str = "return expects a value to return";
pub const ERROR_LEFT_BRACE: &str = "expecting '{'";
pub const ERROR_RIGHT_BRACE: &str = "expecting '}'";
pub const ERROR_RIGHT_BRACKET: &str = "expecting ']'";
pub const ERROR_GOTO_STEP: &str = "missing step name after goto";
pub const ERROR_IMPORT_STEP: &str = "missing step name after import";
pub const ERROR_DOUBLE_QUOTE: &str = "expecting '\"' to end string";
pub const ERROR_DOUBLE_OPEN_BRACE: &str = "expecting '{{' to begin expandable string";
pub const ERROR_DOUBLE_CLOSE_BRACE: &str = "expecting '}}' to end expandable string";
pub const ERROR_UNREACHABLE: &str = "unreachable";
pub const ERROR_WRONG_ARGUMENT_EXPANDABLE_STRING: &str =
"wrong argument(s) given to expandable string";
pub const ERROR_FN_SCOPE: &str =
"invalid action. Use a valid action for this type of scope [do, if, return, ...]"; //\ndoc: https://docs.csml.dev/language/native-csml-functions
// Linter Errors
pub const ERROR_NO_FLOW: &str = "bot must have at least one flow";
// ##Interpreter Errors
// ### Validation
pub const ERROR_STEP_EXIST: &str = "step does not exist";
pub const ERROR_INVALID_FLOW: &str = "invalid flow: ";
pub const ERROR_START_INSTRUCTIONS: &str =
"to start an action one of the following instructions is expected: [say, do, if, foreach, goto]";
pub const ERROR_FOREACH: &str =
"foreach only accepts iterable elements like arrays and strings. Example: foreach(elem) in [1, 2, 3]";
pub const ERROR_FIND_BY_INDEX: &str =
"index must be of type int or string. Example var.[42] or var.[\"key\"]";
pub const ERROR_ASSIGN_IDENT: &str = "key must be of type identifier";
pub const ERROR_SIZE_IDENT: &str = "key can't be longer than 255 character";
pub const ERROR_NUMBER_AS_KEY: &str = "Int/Float can't be used as key";
pub const ERROR_KEY_ALPHANUMERIC: &str = "key must be alphanumeric";
pub const ERROR_FUNCTIONS_ARGS: &str = "function arguments must be in an array";
pub const ERROR_EXPR_TO_LITERAL: &str = "expression can't be converted to Literal";
pub const ERROR_PAYLOAD_EXCEED_MAX_SIZE: &str = "payload exceeds max payload size (16kb)";
pub const ERROR_STEP_LIMIT: &str =
"[Infinite loop] Step limit reached: 100 steps where executed in a single run";
// Event
pub const ERROR_EVENT_CONTENT_TYPE: &str = "event can only be of ContentType::Event";
// Goto
pub const ERROR_GOTO_VAR: &str = "variables in goto need to resolve as strings";
// Component
pub const ERROR_COMPONENT_NAMESPACE: &str = "component must have a function applied";
pub const ERROR_COMPONENT_UNKNOWN: &str = "function does not exist for component";
// Fn API
pub const ERROR_FN_ID: &str = "App name must be of type string";
pub const ERROR_FN_ENDPOINT: &str = "App can not be called because apps_endpoint is not set in bot";
pub const ERROR_FAIL_RESPONSE_JSON: &str = "failed to read response as JSON";
// ### Import
pub const ERROR_IMPORT_FAIL: &str = "import failed at";
pub const ERROR_IMPORT_STEP_FLOW: &str = "step not found in flow";
// ### Variables
pub const ERROR_GET_VAR_INFO: &str = "Expression must be a variable";
pub const ERROR_JSON_TO_LITERAL: &str = "Number is larger than a 64-bit integer";
// ### Memory
pub const ERROR_STEP_MEMORY: &str = "Variable does not exist in step's memory";
pub const ERROR_FIND_MEMORY: &str = "is used before it was saved in memory";
// ### Functions
pub const ERROR_FN_ARGS: &str = "function arguments are not valid";
pub const ERROR_FN_COLON: &str =
"Expecting ':' at the end of function prototype. Example: 'fn name():' ";
// ### Built-in
pub const ERROR_TEXT: &str =
"Text component expects one argument of type string. Example: Text(\"hola\")";
pub const ERROR_TYPING: &str =
"Typing component expects one argument of type int or float. Example: Typing(3, ..)";
pub const ERROR_WAIT: &str =
"Wait component expects one argument of type int or float. Example: Wait(3)";
pub const ERROR_BUTTON: &str =
"Button component expects at least one argument of type string. Example: Button(\"hola\")";
pub const ERROR_CARD_BUTTON: &str = "argument 'buttons' in Card component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CARD_TITLE: &str = "argument title in Card component must be of type String";
pub const ERROR_QUESTION: &str = "argument 'buttons' in Question component must be of type Array<Button>. Example: [ Button(\"b1\"), Button(\"b2\") ]";
pub const ERROR_CAROUSEL: &str =
"argument 'cards' in Carousel component must be of type Array<Card>";
pub const ERROR_ONE_OF: &str =
"OneOf builtin expects one value of type Array. Example: OneOf( [1, 2, 3] )";
pub const ERROR_VAR_EXISTS: &str =
"Exists builtin expects one value of type String. Example: Exists( \"var_name\" )";
pub const ERROR_SHUFFLE: &str =
"Shuffle builtin expects one value of type Array. Example: Shuffle( [1, 2, 3] )";
pub const ERROR_LENGTH: &str =
"Length builtin expects one value of type Array or String. Example: Length( value )";
pub const ERROR_FIND: &str = "Find builtin expects 'in' param to be of type String. Example: Find(value, in = \"hola\", case_sensitive = true)";
pub const ERROR_FLOOR: &str =
"Floor builtin expects one argument of type float. Example: Floor(4.2)";
pub const ERROR_UUID: &str =
"UUID builtin expects one optional argument of type String. Example: UUID(\"v4\") or UUID(\"v1\")";
pub const ERROR_IMAGE: &str =
"Image component expects one argument of type string. Example: Image(\"hola\")";
pub const ERROR_URL: &str = "Url component expects one argument of type string and 2 optional string arguments: text, title. Example: Url(\"hola\", text = \"text\", title = \"title\")";
pub const ERROR_VIDEO: &str =
"Video component expects one argument of type string. Example: Video(url = \"hola\")";
pub const ERROR_AUDIO: &str =
"Audio component expects one argument of type string. Example: Audio(url = \"hola\")";
pub const ERROR_FILE: &str =
"File component expects one argument of type string. Example: File(url = \"hola\")";
pub const ERROR_HTTP_GET_VALUE: &str =
"not found in HTTP object. Use the HTTP() builtin to construct the correct object to make HTTP calls";
pub const ERROR_HTTP_QUERY_VALUES: &str =
"must have a value of type String. Example: {key: \"value\"}";
pub const ERROR_HTTP: &str =
"HTTP builtin expects one url of type string. Example: HTTP(\"https://clevy.io\")";
pub const ERROR_JWT: &str = "JWT builtin expects payload as argument. Example: JWT({
\"user\": \"name\",
\"somekey\": {
\"somevalue\": 42
},
\"exp\": 1618064023,
\"iss\": \"CSML STUDIO\"
})";
pub const ERROR_SMTP: &str =
"SMTP builtin expects SMTP Server Address. Example: SMTP(\"smtp.gmail.com\")";
pub const ERROR_CRYPTO: &str =
"CRYPTO builtin expects one argument of type string. Example: CRYPTO(\"text\")";
pub const ERROR_BUILTIN_UNKNOWN: &str = "Unknown builtin";
// ### native Components
pub const ERROR_HTTP_NOT_DATA: &str = "bad format: no 'data' in HTTP response";
pub const ERROR_NATIVE_COMPONENT: &str = "native component does not exist";
// ### Constants
pub const ERROR_CONSTANT_MUTABLE_FUNCTION: &str =
"Invalid operation constants can not execute self mutable functions";
pub const ERROR_INVALID_CONSTANT_EXPR: &str =
"Constant invalid expression type: constants can not be assign this type of expression";
// ### Primitives
// #### Indexing
pub const ERROR_INDEXING: &str =
"indexing can only be done in ARRAY, OBJECT or STRING primitive types";
// #### Closure
pub const ERROR_CLOSURE_UNKNOWN_METHOD: &str = "Closure don't have methods";
// #### Boolean
pub const ERROR_BOOLEAN_UNKNOWN_METHOD: &str = "is not a method of Boolean";
// #### NUMBER
pub const ERROR_NUMBER_POW: &str =
"[pow] takes one parameter of type int or float usage: number.pow(42)";
// #### Float
pub const ERROR_FLOAT_UNKNOWN_METHOD: &str = "is not a method of Float";
// #### Int
pub const ERROR_INT_UNKNOWN_METHOD: &str = "is not a method of Int";
// #### Null
pub const ERROR_NULL_UNKNOWN_METHOD: &str = "is not a method of Null";
// #### String
pub const ERROR_STRING_DO_MATCH: &str =
"[do_match] takes one parameter of type String. Usage: string.do_match(\"tag\")";
pub const ERROR_STRING_APPEND: &str =
"[append] takes one parameter of type String. Usage: string.append(\"text to append\")";
pub const ERROR_STRING_CONTAINS: &str =
"[contains] takes one parameter of type String. Usage: string.contains(\"word\")";
pub const ERROR_STRING_REPLACE: &str =
"[replace] takes tow parameter of type String. Usage: \"this is old\".replace(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_ALL: &str =
"[replace_all] takes tow parameter of type String. Usage: \"old old old old\".replace_all(\"old\", \"new\")";
pub const ERROR_STRING_REPLACE_REGEX: &str =
"[replace_regex] takes tow parameter of type String. Usage: \"hello world\".replace_regex(\"world\", \"Clevy\")";
pub const ERROR_STRING_CONTAINS_REGEX: &str =
"[contains_regex] takes one parameter of type String. Usage: string.contains_regex(\"regex\")";
pub const ERROR_STRING_VALID_REGEX: &str = "parameter must be a valid regex expression"; // link to docs
pub const ERROR_STRING_START_WITH: &str =
"[starts_with] takes one parameter of type String. Usage: string.starts_with(\"tag\")";
pub const ERROR_STRING_START_WITH_REGEX: &str = "[starts_with_regex] takes one parameter of type String. Usage: string.start_with_regex(\"regex\")";
pub const ERROR_STRING_END_WITH: &str =
"[ends_with] takes one parameter of type String. Usage: string.ends_with(\"tag\")";
pub const ERROR_STRING_END_WITH_REGEX: &str =
"[ends_with_regex] takes one parameter of type String. Usage: string.ends_with_regex(\"regex\")";
pub const ERROR_STRING_FROM_JSON: &str = "[from_json] [!] string to object failed]";
pub const ERROR_STRING_SPLIT: &str =
"[split] takes one parameter of type String. Usage: string.split(\"separator\")";
pub const ERROR_STRING_MATCH_REGEX: &str =
"[match_regex] takes one parameter of type String. Usage: string.match_regex(\"regex\")";
pub const ERROR_STRING_POW: &str =
"[pow] takes one parameter of type Float or Int. Usage: string.pow(number)";
pub const ERROR_STRING_COS: &str = "[cos] the string must be of numeric type in order to use cos. Verify first with 'string.is_number() == true' ";
pub const ERROR_STRING_NUMERIC: &str = "the string must be of numeric type in order to use this method. Verify first with 'string.is_number() == true' to check it";
pub const ERROR_STRING_RHS: &str = "rhs must be of type string";
pub const ERROR_SLICE_ARG_INT: &str =
".slice(start, optional<end>) args need to be of type Integer";
pub const ERROR_SLICE_ARG_LEN: &str =
".slice(start, optional<end>) args need to be inferior to the string length";
pub const ERROR_SLICE_ARG2: &str =
".slice(start, optional<end>) end need to be superior to start in value ex: .slice(2, 5)";
pub const ERROR_STRING_UNKNOWN_METHOD: &str = "is not a method of String";
// #### Array
pub const ERROR_ARRAY_TYPE: &str = "value must be of type array";
pub const ERROR_ARRAY_INDEX_EXIST: &str = "index does not exist";
pub const ERROR_ARRAY_INDEX_TYPE: &str = "index must be of type int";
pub const ERROR_ARRAY_NEGATIVE: &str = "index must be positive. Udage: array[1]";
pub const ERROR_ARRAY_INDEX: &str = "index must be lower than or equal to array.length()";
pub const ERROR_ARRAY_OVERFLOW: &str = "[push] Cannot push inside array, since array limit is ";
pub const ERROR_ARRAY_POP: &str = "[pop] Cannot pop if array is empty";
pub const ERROR_ARRAY_INSERT_AT: &str =
"[insert_at] takes two arguments. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_INSERT_AT_INT: &str =
"[insert_at] first parameter must be of type int. Usage: array.insert_at(1, elem)";
pub const ERROR_ARRAY_REMOVE_AT: &str =
"[remove_at] takes one parameter of type Int. Usage: array.remove_at(1) ";
pub const ERROR_ARRAY_JOIN: &str =
"[join] takes one parameter of type String. Usage: array.join(\"elem\") ";
pub const ERROR_ARRAY_INDEX_OF: &str =
"[index_of] takes one parameter. Usage: array.index_of(elem)";
pub const ERROR_ARRAY_FIND: &str = "[find] takes one parameter. Usage: array.find(elem)";
pub const ERROR_ARRAY_UNKNOWN_METHOD: &str = "is not a method of Array";
// #### CRYPTO OBJECT
// ## HMAC and HASH OBJECT
pub const ERROR_HASH: &str = "Crypto(string) command expect argument of type String";
pub const ERROR_HASH_ALGO: &str =
"Invalid Algorithm, supported Algorithms are md5 sha1 sha256 sha384 sha512";
pub const ERROR_HMAC_KEY: &str = "HMAC key need to be of type string";
pub const ERROR_DIGEST: &str = "Invalid argument, '.digest' is use incorrectly";
pub const ERROR_DIGEST_ALGO: &str =
"Invalid Digest Algorithm, supported Algorithms are hex, base64";
// #### JWT OBJECT
pub const ERROR_JWT_ALGO: &str = "Invalid Algorithm, supported Algorithms are HS256, HS384, HS512";
pub const ERROR_JWT_SECRET: &str = "secret must be of type String";
pub const ERROR_JWT_SIGN_CLAIMS: &str =
"JWT(claims) command expect argument 'claims' of type Object";
pub const ERROR_JWT_SIGN_ALGO: &str =
"JWT(claims).sign(algo, secret, Optional<Header>) expect first argument 'algo' of type String";
pub const ERROR_JWT_SIGN_SECRET: &str = "JWT(claims).sign(algo, secret, Optional<Header>) expect second argument 'claims' of type String";
pub const ERROR_JWT_TOKEN: &str = "JWT(jwt) command expect argument 'jwt' of type String";
pub const ERROR_JWT_DECODE_ALGO: &str =
"JWT(jwt).decode(algo, secret) expect first argument 'algo' of type String";
pub const ERROR_JWT_DECODE_SECRET: &str =
"JWT(jwt).decode(algo, secret) expect second argument 'claims' of type String";
pub const ERROR_JWT_VALIDATION_CLAIMS: &str =
"JWT(jwt).verify(claims, algo, secret) expect first argument 'claims' of type Object";
pub const ERROR_JWT_VALIDATION_ALGO: &str =
"JWT(jwt).verify(claims, algo, secret) expect second argument 'algo' of type String";
pub const ERROR_JWT_VALIDATION_SECRETE: &str =
"JWT(jwt).verify(claims, algo, secret) expect third argument 'secrete' of type String";
// #### HTTP OBJECT
pub const ERROR_HTTP_SET: &str =
"[set] takes one argument of type Object. Usage: HTTP(...).set( {\"key\": 42} )";
pub const ERROR_HTTP_QUERY: &str =
"[query] takes one argument of type Object. Usage: HTTP(...).query( {\"key\": 42} )";
pub const ERROR_HTTP_SEND: &str = "[send] HTTP Object is bad formatted read doc for correct usage";
pub const ERROR_HTTP_UNKNOWN_METHOD: &str = "is not a method of HTTP";
// #### OBJECT
pub const ERROR_OBJECT_TYPE: &str = "value must be of type Object";
pub const ERROR_OBJECT_GET: &str = "key does not exist";
pub const ERROR_OBJECT_CONTAINS: &str =
"[contains] takes one argument of type String. Usage: object.contains(\"key\")";
pub const ERROR_OBJECT_GET_GENERICS: &str =
"[get_generics] takes one argument of type String. Usage: object.get_generics(\"key\")";
pub const ERROR_OBJECT_INSERT: &str =
"[insert] take tow arguments. Usage: object.insert(string, any_type)";
pub const ERROR_OBJECT_ASSIGN: &str =
"[assign] take one argument. Usage: object.assign({\"key\": \"value\"})";
pub const ERROR_OBJECT_REMOVE: &str =
"[remove] takes one argument of type String. Usage: object.remove(\"key\")";
pub const ERROR_OBJECT_GET_KEY: &str = "key must be of type String";
pub const ERROR_OBJECT_UNKNOWN_METHOD: &str = "is not a method of Object";
// #### METHODS
pub const ERROR_METHOD_NAMED_ARGS: &str = "arguments in method are not named";
pub const ERROR_OPS: &str = "[!] Ops: Illegal operation";
pub const ERROR_OPS_DIV_INT: &str = "[!] Int: Division by zero";
pub const ERROR_OPS_DIV_FLOAT: &str = "[!] Float: Division by zero";
pub const ERROR_ILLEGAL_OPERATION: &str = "illegal operation:";
pub const OVERFLOWING_OPERATION: &str = "overflowing operation:";
////////////////////////////////////////////////////////////////////////////////
// PRiVTE FUNCTION
////////////////////////////////////////////////////////////////////////////////
fn add_context_to_error_message<'a>(
flow_slice: Span<'a>,
message: String,
line_number: u32,
column: usize,
offset: usize,
) -> String {
use std::fmt::Write;
let mut result = String::new();
let prefix = &flow_slice.fragment().as_bytes()[..offset];
// Find the line that includes the subslice:
// Find the *last* newline before the substring starts
let line_begin = prefix
.iter()
.rev()
.position(|&b| b == b'\n')
.map(|pos| offset - pos)
.unwrap_or(0);
// Find the full line after that newline
let line = flow_slice.fragment()[line_begin..]
.lines()
.next()
.unwrap_or(&flow_slice.fragment()[line_begin..])
.trim_end();
write!(
&mut result,
"at line {line_number},\n\
{line}\n\
{caret:>column$}\n\
{context}\n\n",
line_number = line_number,
context = message,
line = line,
caret = '^',
column = column,
)
// Because `write!` to a `String` is infallible, this `unwrap` is fine.
.unwrap();
result
}
////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTION
////////////////////////////////////////////////////////////////////////////////
pub fn gen_error_info(position: Position, message: String) -> ErrorInfo {
ErrorInfo::new(position, message)
}
pub fn gen_warning_info(position: Position, message: String) -> Warnings {
Warnings { position, message }
}
pub fn gen_nom_error<'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Error(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn | <'a, E>(span: Span<'a>, error: &'static str) -> Err<E>
where
E: ParseError<Span<'a>> + ContextError<Span<'a>>,
{
Err::Failure(E::add_context(
span,
error,
E::from_error_kind(span, ErrorKind::Tag),
))
}
pub fn convert_error_from_span<'a>(flow_slice: Span<'a>, e: CustomError<Span<'a>>) -> String {
let message = e.error.to_owned();
let offset = e.input.location_offset();
// Count the number of newlines in the first `offset` bytes of input
let line_number = e.input.location_line();
// The (1-indexed) column number is the offset of our substring into that line
let column = e.input.get_column();
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn convert_error_from_interval<'a>(
flow_slice: Span<'a>,
message: String,
interval: Interval,
) -> String {
let offset = interval.offset;
// Count the number of newlines in the first `offset` bytes of input
let line_number = interval.start_line;
// The (1-indexed) column number is the offset of our substring into that line
let column = interval.start_column as usize;
add_context_to_error_message(flow_slice, message, line_number, column, offset)
}
pub fn gen_infinite_loop_error_msg(infinite_loop: Vec<(String, String)>) -> String {
infinite_loop
.iter()
.fold(String::new(), |mut acc, (flow, step)| {
acc.push_str(&format!("[flow] {}, [step] {}\n", flow, step));
acc
})
}
| gen_nom_failure | identifier_name |
types.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgwire
import (
"context"
"encoding/binary"
"fmt"
"math"
"math/big"
"net"
"strconv"
"strings"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/ruiaylin/pgparser/telemetry"
"github.com/ruiaylin/pgparser/lex"
"github.com/ruiaylin/pgparser/pgwire/pgwirebase"
"github.com/ruiaylin/pgparser/ast"
"github.com/ruiaylin/pgparser/sessiondata"
"github.com/ruiaylin/pgparser/sqltelemetry"
"github.com/ruiaylin/pgparser/types"
"github.com/ruiaylin/pgparser/utils/duration"
"github.com/ruiaylin/pgparser/utils/errorutil/unimplemented"
"github.com/ruiaylin/pgparser/utils/ipaddr"
"github.com/ruiaylin/pgparser/utils/log"
"github.com/ruiaylin/pgparser/utils/timeofday"
"github.com/ruiaylin/pgparser/utils/timetz"
"github.com/cockroachdb/errors"
"github.com/lib/pq/oid"
)
// pgType contains type metadata used in RowDescription messages.
type pgType struct {
oid oid.Oid
// Variable-size types have size=-1.
// Note that the protocol has both int16 and int32 size fields,
// so this attribute is an unsized int and should be cast
// as needed.
// This field does *not* correspond to the encoded length of a
// data type, so it's unclear what, if anything, it is used for.
// To get the right value, "SELECT oid, typlen FROM pg_type"
// on a postgres server.
size int
}
func pgTypeForParserType(t *types.T) pgType {
size := -1
if s, variable := ast.DatumTypeSize(t); !variable {
size = int(s)
}
return pgType{
oid: t.Oid(),
size: size,
}
}
// resolveBlankPaddedChar pads the given string with spaces if blank padding is
// required or returns the string unmodified otherwise.
func resolveBlankPaddedChar(s string, t *types.T) string {
if t.Oid() == oid.T_bpchar {
// Pad spaces on the right of the string to make it of length specified in
// the type t.
return fmt.Sprintf("%-*v", t.Width(), s)
}
return s
}
// writeTextDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings and therefore need padding (chars).
// It is ignored (and can be nil) for types which do not need padding.
func (b *writeBuffer) writeTextDatum(
ctx context.Context, d ast.Datum, conv sessiondata.DataConversionConfig, t *types.T,
) {
if log.V(2) {
log.Infof(ctx, "pgwire writing TEXT datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DBool:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DInt:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendInt(b.putbuf[4:4], int64(*v), 10)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DFloat:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendFloat(b.putbuf[4:4], float64(*v), 'g', conv.GetFloatPrec(), 64)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DDecimal:
b.writeLengthPrefixedDatum(v)
case *ast.DBytes:
result := lex.EncodeByteArrayToRawBytes(
string(*v), conv.BytesEncodeFormat, false /* skipHexPrefix */)
b.putInt32(int32(len(result)))
b.write([]byte(result))
case *ast.DUuid:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := b.putbuf[4 : 4+36]
v.UUID.StringBytes(s)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DIPAddr:
b.writeLengthPrefixedString(v.IPAddr.String())
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DDate:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DTime:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTime(timeofday.TimeOfDay(*v), b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimeTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTimeTZ(v.TimeTZ, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DGeography:
s := v.Geography.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DGeometry:
s := v.Geometry.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DTimestamp:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, nil, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimestampTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, conv.Location, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DInterval:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DJSON:
b.writeLengthPrefixedString(v.JSON.String())
case *ast.DTuple:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DArray:
// Arrays have custom formatting depending on their OID.
b.textFormatter.FormatNode(d)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DOid:
b.writeLengthPrefixedDatum(v)
case *ast.DEnum:
// Enums are serialized with their logical representation.
b.writeLengthPrefixedString(v.LogicalRep)
default:
b.setError(errors.Errorf("unsupported type %T", d))
}
}
// writeBinaryDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings (floats, ints, chars). It is ignored
// (and can be nil) for types with a 1:1 datum:type mapping.
func (b *writeBuffer) writeBinaryDatum(
ctx context.Context, d ast.Datum, sessionLoc *time.Location, t *types.T,
) {
if log.V(2) {
log.Infof(ctx, "pgwire writing BINARY datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
words, lastBitsUsed := v.EncodingParts()
if len(words) == 0 {
b.putInt32(4)
} else {
// Encode the length of the output bytes. It is computed here so we don't
// have to keep a buffer.
// 4: the int32 of the bitLen.
// 8*(len(words)-1): number of 8-byte words except the last one since it's
// partial.
// (lastBitsUsed+7)/8: number of bytes that will be written in the last
// partial word. The /8 rounds down, such that the +7 will cause 1-or-more
// bits to use a byte, but 0 will not.
b.putInt32(4 + int32(8*(len(words)-1)) + int32((lastBitsUsed+7)/8))
}
bitLen := v.BitLen()
b.putInt32(int32(bitLen))
var byteBuf [8]byte
for i := 0; i < len(words)-1; i++ {
w := words[i]
binary.BigEndian.PutUint64(byteBuf[:], w)
b.write(byteBuf[:])
}
if len(words) > 0 {
w := words[len(words)-1]
for i := uint(0); i < uint(lastBitsUsed); i += 8 {
c := byte(w >> (56 - i))
b.writeByte(c)
}
}
case *ast.DBool:
b.putInt32(1)
if *v | else {
b.writeByte(0)
}
case *ast.DInt:
switch t.Oid() {
case oid.T_int2:
b.putInt32(2)
b.putInt16(int16(*v))
case oid.T_int4:
b.putInt32(4)
b.putInt32(int32(*v))
case oid.T_int8:
b.putInt32(8)
b.putInt64(int64(*v))
default:
b.setError(errors.Errorf("unsupported int oid: %v", t.Oid()))
}
case *ast.DFloat:
switch t.Oid() {
case oid.T_float4:
b.putInt32(4)
b.putInt32(int32(math.Float32bits(float32(*v))))
case oid.T_float8:
b.putInt32(8)
b.putInt64(int64(math.Float64bits(float64(*v))))
default:
b.setError(errors.Errorf("unsupported float oid: %v", t.Oid()))
}
case *ast.DDecimal:
if v.Form != apd.Finite {
b.putInt32(8)
// 0 digits.
b.putInt32(0)
// https://github.com/postgres/postgres/blob/ffa4cbd623dd69f9fa99e5e92426928a5782cf1a/src/backend/utils/adt/numeric.c#L169
b.write([]byte{0xc0, 0, 0, 0})
if v.Form == apd.Infinite {
// TODO(mjibson): #32489
// The above encoding is not correct for Infinity, but since that encoding
// doesn't exist in postgres, it's unclear what to do. For now use the NaN
// encoding and count it to see if anyone even needs this.
telemetry.Inc(sqltelemetry.BinaryDecimalInfinityCounter)
}
return
}
alloc := struct {
pgNum pgwirebase.PGNumeric
bigI big.Int
}{
pgNum: pgwirebase.PGNumeric{
// Since we use 2000 as the exponent limits in ast.DecimalCtx, this
// conversion should not overflow.
Dscale: int16(-v.Exponent),
},
}
if v.Sign() >= 0 {
alloc.pgNum.Sign = pgwirebase.PGNumericPos
} else {
alloc.pgNum.Sign = pgwirebase.PGNumericNeg
}
isZero := func(r rune) bool {
return r == '0'
}
// Mostly cribbed from libpqtypes' str2num.
digits := strings.TrimLeftFunc(alloc.bigI.Abs(&v.Coeff).String(), isZero)
dweight := len(digits) - int(alloc.pgNum.Dscale) - 1
digits = strings.TrimRightFunc(digits, isZero)
if dweight >= 0 {
alloc.pgNum.Weight = int16((dweight+1+pgwirebase.PGDecDigits-1)/pgwirebase.PGDecDigits - 1)
} else {
alloc.pgNum.Weight = int16(-((-dweight-1)/pgwirebase.PGDecDigits + 1))
}
offset := (int(alloc.pgNum.Weight)+1)*pgwirebase.PGDecDigits - (dweight + 1)
alloc.pgNum.Ndigits = int16((len(digits) + offset + pgwirebase.PGDecDigits - 1) / pgwirebase.PGDecDigits)
if len(digits) == 0 {
offset = 0
alloc.pgNum.Ndigits = 0
alloc.pgNum.Weight = 0
}
digitIdx := -offset
nextDigit := func() int16 {
var ndigit int16
for nextDigitIdx := digitIdx + pgwirebase.PGDecDigits; digitIdx < nextDigitIdx; digitIdx++ {
ndigit *= 10
if digitIdx >= 0 && digitIdx < len(digits) {
ndigit += int16(digits[digitIdx] - '0')
}
}
return ndigit
}
b.putInt32(int32(2 * (4 + alloc.pgNum.Ndigits)))
b.putInt16(alloc.pgNum.Ndigits)
b.putInt16(alloc.pgNum.Weight)
b.putInt16(int16(alloc.pgNum.Sign))
b.putInt16(alloc.pgNum.Dscale)
for digitIdx < len(digits) {
b.putInt16(nextDigit())
}
case *ast.DBytes:
b.putInt32(int32(len(*v)))
b.write([]byte(*v))
case *ast.DUuid:
b.putInt32(16)
b.write(v.GetBytes())
case *ast.DIPAddr:
// We calculate the Postgres binary format for an IPAddr. For the spec see,
// https://github.com/postgres/postgres/blob/81c5e46c490e2426db243eada186995da5bb0ba7/src/backend/utils/adt/network.c#L144
// The pgBinary encoding is as follows:
// The int32 length of the following bytes.
// The family byte.
// The mask size byte.
// A 0 byte for is_cidr. It's ignored on the postgres frontend.
// The length of our IP bytes.
// The IP bytes.
const pgIPAddrBinaryHeaderSize = 4
if v.Family == ipaddr.IPv4family {
b.putInt32(net.IPv4len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv4family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv4len))
err := v.Addr.WriteIPv4Bytes(b)
if err != nil {
b.setError(err)
}
} else if v.Family == ipaddr.IPv6family {
b.putInt32(net.IPv6len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv6family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv6len))
err := v.Addr.WriteIPv6Bytes(b)
if err != nil {
b.setError(err)
}
} else {
b.setError(errors.Errorf("error encoding inet to pgBinary: %v", v.IPAddr))
}
case *ast.DEnum:
b.writeLengthPrefixedString(v.LogicalRep)
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DTimestamp:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, nil))
case *ast.DTimestampTZ:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, sessionLoc))
case *ast.DDate:
b.putInt32(4)
b.putInt32(v.PGEpochDays())
case *ast.DTime:
b.putInt32(8)
b.putInt64(int64(*v))
case *ast.DTimeTZ:
b.putInt32(12)
b.putInt64(int64(v.TimeOfDay))
b.putInt32(v.OffsetSecs)
case *ast.DInterval:
b.putInt32(16)
b.putInt64(v.Nanos() / int64(time.Microsecond/time.Nanosecond))
b.putInt32(int32(v.Days))
b.putInt32(int32(v.Months))
case *ast.DTuple:
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of datums.
subWriter.putInt32(int32(len(v.D)))
for _, elem := range v.D {
oid := elem.ResolvedType().Oid()
subWriter.putInt32(int32(oid))
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, elem.ResolvedType())
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DGeography:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DGeometry:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DArray:
if v.ParamTyp.Family() == types.ArrayFamily {
b.setError(unimplemented.NewWithIssueDetail(32552,
"binenc", "unsupported binary serialization of multidimensional arrays"))
return
}
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of dimensions. We currently support 1d arrays only.
var ndims int32 = 1
if v.Len() == 0 {
ndims = 0
}
subWriter.putInt32(ndims)
hasNulls := 0
if v.HasNulls {
hasNulls = 1
}
oid := v.ParamTyp.Oid()
subWriter.putInt32(int32(hasNulls))
subWriter.putInt32(int32(oid))
if v.Len() > 0 {
subWriter.putInt32(int32(v.Len()))
// Lower bound, we only support a lower bound of 1.
subWriter.putInt32(1)
for _, elem := range v.Array {
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, v.ParamTyp)
}
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DJSON:
s := v.JSON.String()
b.putInt32(int32(len(s) + 1))
// Postgres version number, as of writing, `1` is the only valid value.
b.writeByte(1)
b.writeString(s)
case *ast.DOid:
b.putInt32(4)
b.putInt32(int32(v.DInt))
default:
b.setError(errors.AssertionFailedf("unsupported type %T", d))
}
}
const (
pgTimeFormat = "15:04:05.999999"
pgTimeTZFormat = pgTimeFormat + "-07:00"
pgDateFormat = "2006-01-02"
pgTimeStampFormatNoOffset = pgDateFormat + " " + pgTimeFormat
pgTimeStampFormat = pgTimeStampFormatNoOffset + "-07:00"
pgTime2400Format = "24:00:00"
)
// formatTime formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
func formatTime(t timeofday.TimeOfDay, tmp []byte) []byte {
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t == timeofday.Time2400 {
return []byte(pgTime2400Format)
}
return t.ToTime().AppendFormat(tmp, pgTimeFormat)
}
// formatTimeTZ formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
// Note it does not understand the "second" component of the offset as lib/pq
// cannot parse it.
func formatTimeTZ(t timetz.TimeTZ, tmp []byte) []byte {
ret := t.ToTime().AppendFormat(tmp, pgTimeTZFormat)
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t.TimeOfDay == timeofday.Time2400 {
// It instead reads 00:00:00. Replace that text.
var newRet []byte
newRet = append(newRet, pgTime2400Format...)
newRet = append(newRet, ret[len(pgTime2400Format):]...)
ret = newRet
}
return ret
}
func formatTs(t time.Time, offset *time.Location, tmp []byte) (b []byte) {
var format string
if offset != nil {
format = pgTimeStampFormat
} else {
format = pgTimeStampFormatNoOffset
}
return formatTsWithFormat(format, t, offset, tmp)
}
// formatTsWithFormat formats t with an optional offset into a format
// lib/pq understands, appending to the provided tmp buffer and
// reallocating if needed. The function will then return the resulting
// buffer. formatTsWithFormat is mostly cribbed from github.com/lib/pq.
func formatTsWithFormat(format string, t time.Time, offset *time.Location, tmp []byte) (b []byte) {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
if offset != nil {
t = t.In(offset)
}
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b = t.AppendFormat(tmp, format)
if bc {
b = append(b, " BC"...)
}
return b
}
// timeToPgBinary calculates the Postgres binary format for a timestamp. The timestamp
// is represented as the number of microseconds between the given time and Jan 1, 2000
// (dubbed the PGEpochJDate), stored within an int64.
func timeToPgBinary(t time.Time, offset *time.Location) int64 {
if offset != nil {
t = t.In(offset)
} else {
t = t.UTC()
}
return duration.DiffMicros(t, pgwirebase.PGEpochJDate)
}
| {
b.writeByte(1)
} | conditional_block |
types.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgwire
import (
"context"
"encoding/binary"
"fmt"
"math"
"math/big"
"net"
"strconv"
"strings"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/ruiaylin/pgparser/telemetry"
"github.com/ruiaylin/pgparser/lex"
"github.com/ruiaylin/pgparser/pgwire/pgwirebase"
"github.com/ruiaylin/pgparser/ast"
"github.com/ruiaylin/pgparser/sessiondata"
"github.com/ruiaylin/pgparser/sqltelemetry"
"github.com/ruiaylin/pgparser/types"
"github.com/ruiaylin/pgparser/utils/duration"
"github.com/ruiaylin/pgparser/utils/errorutil/unimplemented"
"github.com/ruiaylin/pgparser/utils/ipaddr"
"github.com/ruiaylin/pgparser/utils/log"
"github.com/ruiaylin/pgparser/utils/timeofday"
"github.com/ruiaylin/pgparser/utils/timetz"
"github.com/cockroachdb/errors"
"github.com/lib/pq/oid"
)
// pgType contains type metadata used in RowDescription messages.
type pgType struct {
oid oid.Oid
// Variable-size types have size=-1.
// Note that the protocol has both int16 and int32 size fields,
// so this attribute is an unsized int and should be cast
// as needed.
// This field does *not* correspond to the encoded length of a
// data type, so it's unclear what, if anything, it is used for.
// To get the right value, "SELECT oid, typlen FROM pg_type"
// on a postgres server.
size int
}
func pgTypeForParserType(t *types.T) pgType {
size := -1
if s, variable := ast.DatumTypeSize(t); !variable {
size = int(s)
}
return pgType{
oid: t.Oid(),
size: size,
}
}
// resolveBlankPaddedChar pads the given string with spaces if blank padding is
// required or returns the string unmodified otherwise.
func resolveBlankPaddedChar(s string, t *types.T) string {
if t.Oid() == oid.T_bpchar {
// Pad spaces on the right of the string to make it of length specified in
// the type t.
return fmt.Sprintf("%-*v", t.Width(), s)
}
return s
}
// writeTextDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings and therefore need padding (chars).
// It is ignored (and can be nil) for types which do not need padding.
func (b *writeBuffer) writeTextDatum(
ctx context.Context, d ast.Datum, conv sessiondata.DataConversionConfig, t *types.T,
) |
// writeBinaryDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings (floats, ints, chars). It is ignored
// (and can be nil) for types with a 1:1 datum:type mapping.
func (b *writeBuffer) writeBinaryDatum(
ctx context.Context, d ast.Datum, sessionLoc *time.Location, t *types.T,
) {
if log.V(2) {
log.Infof(ctx, "pgwire writing BINARY datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
words, lastBitsUsed := v.EncodingParts()
if len(words) == 0 {
b.putInt32(4)
} else {
// Encode the length of the output bytes. It is computed here so we don't
// have to keep a buffer.
// 4: the int32 of the bitLen.
// 8*(len(words)-1): number of 8-byte words except the last one since it's
// partial.
// (lastBitsUsed+7)/8: number of bytes that will be written in the last
// partial word. The /8 rounds down, such that the +7 will cause 1-or-more
// bits to use a byte, but 0 will not.
b.putInt32(4 + int32(8*(len(words)-1)) + int32((lastBitsUsed+7)/8))
}
bitLen := v.BitLen()
b.putInt32(int32(bitLen))
var byteBuf [8]byte
for i := 0; i < len(words)-1; i++ {
w := words[i]
binary.BigEndian.PutUint64(byteBuf[:], w)
b.write(byteBuf[:])
}
if len(words) > 0 {
w := words[len(words)-1]
for i := uint(0); i < uint(lastBitsUsed); i += 8 {
c := byte(w >> (56 - i))
b.writeByte(c)
}
}
case *ast.DBool:
b.putInt32(1)
if *v {
b.writeByte(1)
} else {
b.writeByte(0)
}
case *ast.DInt:
switch t.Oid() {
case oid.T_int2:
b.putInt32(2)
b.putInt16(int16(*v))
case oid.T_int4:
b.putInt32(4)
b.putInt32(int32(*v))
case oid.T_int8:
b.putInt32(8)
b.putInt64(int64(*v))
default:
b.setError(errors.Errorf("unsupported int oid: %v", t.Oid()))
}
case *ast.DFloat:
switch t.Oid() {
case oid.T_float4:
b.putInt32(4)
b.putInt32(int32(math.Float32bits(float32(*v))))
case oid.T_float8:
b.putInt32(8)
b.putInt64(int64(math.Float64bits(float64(*v))))
default:
b.setError(errors.Errorf("unsupported float oid: %v", t.Oid()))
}
case *ast.DDecimal:
if v.Form != apd.Finite {
b.putInt32(8)
// 0 digits.
b.putInt32(0)
// https://github.com/postgres/postgres/blob/ffa4cbd623dd69f9fa99e5e92426928a5782cf1a/src/backend/utils/adt/numeric.c#L169
b.write([]byte{0xc0, 0, 0, 0})
if v.Form == apd.Infinite {
// TODO(mjibson): #32489
// The above encoding is not correct for Infinity, but since that encoding
// doesn't exist in postgres, it's unclear what to do. For now use the NaN
// encoding and count it to see if anyone even needs this.
telemetry.Inc(sqltelemetry.BinaryDecimalInfinityCounter)
}
return
}
alloc := struct {
pgNum pgwirebase.PGNumeric
bigI big.Int
}{
pgNum: pgwirebase.PGNumeric{
// Since we use 2000 as the exponent limits in ast.DecimalCtx, this
// conversion should not overflow.
Dscale: int16(-v.Exponent),
},
}
if v.Sign() >= 0 {
alloc.pgNum.Sign = pgwirebase.PGNumericPos
} else {
alloc.pgNum.Sign = pgwirebase.PGNumericNeg
}
isZero := func(r rune) bool {
return r == '0'
}
// Mostly cribbed from libpqtypes' str2num.
digits := strings.TrimLeftFunc(alloc.bigI.Abs(&v.Coeff).String(), isZero)
dweight := len(digits) - int(alloc.pgNum.Dscale) - 1
digits = strings.TrimRightFunc(digits, isZero)
if dweight >= 0 {
alloc.pgNum.Weight = int16((dweight+1+pgwirebase.PGDecDigits-1)/pgwirebase.PGDecDigits - 1)
} else {
alloc.pgNum.Weight = int16(-((-dweight-1)/pgwirebase.PGDecDigits + 1))
}
offset := (int(alloc.pgNum.Weight)+1)*pgwirebase.PGDecDigits - (dweight + 1)
alloc.pgNum.Ndigits = int16((len(digits) + offset + pgwirebase.PGDecDigits - 1) / pgwirebase.PGDecDigits)
if len(digits) == 0 {
offset = 0
alloc.pgNum.Ndigits = 0
alloc.pgNum.Weight = 0
}
digitIdx := -offset
nextDigit := func() int16 {
var ndigit int16
for nextDigitIdx := digitIdx + pgwirebase.PGDecDigits; digitIdx < nextDigitIdx; digitIdx++ {
ndigit *= 10
if digitIdx >= 0 && digitIdx < len(digits) {
ndigit += int16(digits[digitIdx] - '0')
}
}
return ndigit
}
b.putInt32(int32(2 * (4 + alloc.pgNum.Ndigits)))
b.putInt16(alloc.pgNum.Ndigits)
b.putInt16(alloc.pgNum.Weight)
b.putInt16(int16(alloc.pgNum.Sign))
b.putInt16(alloc.pgNum.Dscale)
for digitIdx < len(digits) {
b.putInt16(nextDigit())
}
case *ast.DBytes:
b.putInt32(int32(len(*v)))
b.write([]byte(*v))
case *ast.DUuid:
b.putInt32(16)
b.write(v.GetBytes())
case *ast.DIPAddr:
// We calculate the Postgres binary format for an IPAddr. For the spec see,
// https://github.com/postgres/postgres/blob/81c5e46c490e2426db243eada186995da5bb0ba7/src/backend/utils/adt/network.c#L144
// The pgBinary encoding is as follows:
// The int32 length of the following bytes.
// The family byte.
// The mask size byte.
// A 0 byte for is_cidr. It's ignored on the postgres frontend.
// The length of our IP bytes.
// The IP bytes.
const pgIPAddrBinaryHeaderSize = 4
if v.Family == ipaddr.IPv4family {
b.putInt32(net.IPv4len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv4family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv4len))
err := v.Addr.WriteIPv4Bytes(b)
if err != nil {
b.setError(err)
}
} else if v.Family == ipaddr.IPv6family {
b.putInt32(net.IPv6len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv6family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv6len))
err := v.Addr.WriteIPv6Bytes(b)
if err != nil {
b.setError(err)
}
} else {
b.setError(errors.Errorf("error encoding inet to pgBinary: %v", v.IPAddr))
}
case *ast.DEnum:
b.writeLengthPrefixedString(v.LogicalRep)
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DTimestamp:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, nil))
case *ast.DTimestampTZ:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, sessionLoc))
case *ast.DDate:
b.putInt32(4)
b.putInt32(v.PGEpochDays())
case *ast.DTime:
b.putInt32(8)
b.putInt64(int64(*v))
case *ast.DTimeTZ:
b.putInt32(12)
b.putInt64(int64(v.TimeOfDay))
b.putInt32(v.OffsetSecs)
case *ast.DInterval:
b.putInt32(16)
b.putInt64(v.Nanos() / int64(time.Microsecond/time.Nanosecond))
b.putInt32(int32(v.Days))
b.putInt32(int32(v.Months))
case *ast.DTuple:
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of datums.
subWriter.putInt32(int32(len(v.D)))
for _, elem := range v.D {
oid := elem.ResolvedType().Oid()
subWriter.putInt32(int32(oid))
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, elem.ResolvedType())
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DGeography:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DGeometry:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DArray:
if v.ParamTyp.Family() == types.ArrayFamily {
b.setError(unimplemented.NewWithIssueDetail(32552,
"binenc", "unsupported binary serialization of multidimensional arrays"))
return
}
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of dimensions. We currently support 1d arrays only.
var ndims int32 = 1
if v.Len() == 0 {
ndims = 0
}
subWriter.putInt32(ndims)
hasNulls := 0
if v.HasNulls {
hasNulls = 1
}
oid := v.ParamTyp.Oid()
subWriter.putInt32(int32(hasNulls))
subWriter.putInt32(int32(oid))
if v.Len() > 0 {
subWriter.putInt32(int32(v.Len()))
// Lower bound, we only support a lower bound of 1.
subWriter.putInt32(1)
for _, elem := range v.Array {
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, v.ParamTyp)
}
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DJSON:
s := v.JSON.String()
b.putInt32(int32(len(s) + 1))
// Postgres version number, as of writing, `1` is the only valid value.
b.writeByte(1)
b.writeString(s)
case *ast.DOid:
b.putInt32(4)
b.putInt32(int32(v.DInt))
default:
b.setError(errors.AssertionFailedf("unsupported type %T", d))
}
}
const (
pgTimeFormat = "15:04:05.999999"
pgTimeTZFormat = pgTimeFormat + "-07:00"
pgDateFormat = "2006-01-02"
pgTimeStampFormatNoOffset = pgDateFormat + " " + pgTimeFormat
pgTimeStampFormat = pgTimeStampFormatNoOffset + "-07:00"
pgTime2400Format = "24:00:00"
)
// formatTime formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
func formatTime(t timeofday.TimeOfDay, tmp []byte) []byte {
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t == timeofday.Time2400 {
return []byte(pgTime2400Format)
}
return t.ToTime().AppendFormat(tmp, pgTimeFormat)
}
// formatTimeTZ formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
// Note it does not understand the "second" component of the offset as lib/pq
// cannot parse it.
func formatTimeTZ(t timetz.TimeTZ, tmp []byte) []byte {
ret := t.ToTime().AppendFormat(tmp, pgTimeTZFormat)
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t.TimeOfDay == timeofday.Time2400 {
// It instead reads 00:00:00. Replace that text.
var newRet []byte
newRet = append(newRet, pgTime2400Format...)
newRet = append(newRet, ret[len(pgTime2400Format):]...)
ret = newRet
}
return ret
}
func formatTs(t time.Time, offset *time.Location, tmp []byte) (b []byte) {
var format string
if offset != nil {
format = pgTimeStampFormat
} else {
format = pgTimeStampFormatNoOffset
}
return formatTsWithFormat(format, t, offset, tmp)
}
// formatTsWithFormat formats t with an optional offset into a format
// lib/pq understands, appending to the provided tmp buffer and
// reallocating if needed. The function will then return the resulting
// buffer. formatTsWithFormat is mostly cribbed from github.com/lib/pq.
func formatTsWithFormat(format string, t time.Time, offset *time.Location, tmp []byte) (b []byte) {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
if offset != nil {
t = t.In(offset)
}
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b = t.AppendFormat(tmp, format)
if bc {
b = append(b, " BC"...)
}
return b
}
// timeToPgBinary calculates the Postgres binary format for a timestamp. The timestamp
// is represented as the number of microseconds between the given time and Jan 1, 2000
// (dubbed the PGEpochJDate), stored within an int64.
func timeToPgBinary(t time.Time, offset *time.Location) int64 {
if offset != nil {
t = t.In(offset)
} else {
t = t.UTC()
}
return duration.DiffMicros(t, pgwirebase.PGEpochJDate)
}
| {
if log.V(2) {
log.Infof(ctx, "pgwire writing TEXT datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DBool:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DInt:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendInt(b.putbuf[4:4], int64(*v), 10)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DFloat:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendFloat(b.putbuf[4:4], float64(*v), 'g', conv.GetFloatPrec(), 64)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DDecimal:
b.writeLengthPrefixedDatum(v)
case *ast.DBytes:
result := lex.EncodeByteArrayToRawBytes(
string(*v), conv.BytesEncodeFormat, false /* skipHexPrefix */)
b.putInt32(int32(len(result)))
b.write([]byte(result))
case *ast.DUuid:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := b.putbuf[4 : 4+36]
v.UUID.StringBytes(s)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DIPAddr:
b.writeLengthPrefixedString(v.IPAddr.String())
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DDate:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DTime:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTime(timeofday.TimeOfDay(*v), b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimeTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTimeTZ(v.TimeTZ, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DGeography:
s := v.Geography.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DGeometry:
s := v.Geometry.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DTimestamp:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, nil, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimestampTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, conv.Location, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DInterval:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DJSON:
b.writeLengthPrefixedString(v.JSON.String())
case *ast.DTuple:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DArray:
// Arrays have custom formatting depending on their OID.
b.textFormatter.FormatNode(d)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DOid:
b.writeLengthPrefixedDatum(v)
case *ast.DEnum:
// Enums are serialized with their logical representation.
b.writeLengthPrefixedString(v.LogicalRep)
default:
b.setError(errors.Errorf("unsupported type %T", d))
}
} | identifier_body |
types.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgwire
import (
"context"
"encoding/binary"
"fmt"
"math"
"math/big"
"net"
"strconv"
"strings"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/ruiaylin/pgparser/telemetry"
"github.com/ruiaylin/pgparser/lex"
"github.com/ruiaylin/pgparser/pgwire/pgwirebase"
"github.com/ruiaylin/pgparser/ast"
"github.com/ruiaylin/pgparser/sessiondata"
"github.com/ruiaylin/pgparser/sqltelemetry"
"github.com/ruiaylin/pgparser/types"
"github.com/ruiaylin/pgparser/utils/duration"
"github.com/ruiaylin/pgparser/utils/errorutil/unimplemented"
"github.com/ruiaylin/pgparser/utils/ipaddr"
"github.com/ruiaylin/pgparser/utils/log"
"github.com/ruiaylin/pgparser/utils/timeofday"
"github.com/ruiaylin/pgparser/utils/timetz"
"github.com/cockroachdb/errors"
"github.com/lib/pq/oid" | oid oid.Oid
// Variable-size types have size=-1.
// Note that the protocol has both int16 and int32 size fields,
// so this attribute is an unsized int and should be cast
// as needed.
// This field does *not* correspond to the encoded length of a
// data type, so it's unclear what, if anything, it is used for.
// To get the right value, "SELECT oid, typlen FROM pg_type"
// on a postgres server.
size int
}
func pgTypeForParserType(t *types.T) pgType {
size := -1
if s, variable := ast.DatumTypeSize(t); !variable {
size = int(s)
}
return pgType{
oid: t.Oid(),
size: size,
}
}
// resolveBlankPaddedChar pads the given string with spaces if blank padding is
// required or returns the string unmodified otherwise.
func resolveBlankPaddedChar(s string, t *types.T) string {
if t.Oid() == oid.T_bpchar {
// Pad spaces on the right of the string to make it of length specified in
// the type t.
return fmt.Sprintf("%-*v", t.Width(), s)
}
return s
}
// writeTextDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings and therefore need padding (chars).
// It is ignored (and can be nil) for types which do not need padding.
func (b *writeBuffer) writeTextDatum(
ctx context.Context, d ast.Datum, conv sessiondata.DataConversionConfig, t *types.T,
) {
if log.V(2) {
log.Infof(ctx, "pgwire writing TEXT datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DBool:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DInt:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendInt(b.putbuf[4:4], int64(*v), 10)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DFloat:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendFloat(b.putbuf[4:4], float64(*v), 'g', conv.GetFloatPrec(), 64)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DDecimal:
b.writeLengthPrefixedDatum(v)
case *ast.DBytes:
result := lex.EncodeByteArrayToRawBytes(
string(*v), conv.BytesEncodeFormat, false /* skipHexPrefix */)
b.putInt32(int32(len(result)))
b.write([]byte(result))
case *ast.DUuid:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := b.putbuf[4 : 4+36]
v.UUID.StringBytes(s)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DIPAddr:
b.writeLengthPrefixedString(v.IPAddr.String())
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DDate:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DTime:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTime(timeofday.TimeOfDay(*v), b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimeTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTimeTZ(v.TimeTZ, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DGeography:
s := v.Geography.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DGeometry:
s := v.Geometry.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DTimestamp:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, nil, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimestampTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, conv.Location, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DInterval:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DJSON:
b.writeLengthPrefixedString(v.JSON.String())
case *ast.DTuple:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DArray:
// Arrays have custom formatting depending on their OID.
b.textFormatter.FormatNode(d)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DOid:
b.writeLengthPrefixedDatum(v)
case *ast.DEnum:
// Enums are serialized with their logical representation.
b.writeLengthPrefixedString(v.LogicalRep)
default:
b.setError(errors.Errorf("unsupported type %T", d))
}
}
// writeBinaryDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings (floats, ints, chars). It is ignored
// (and can be nil) for types with a 1:1 datum:type mapping.
func (b *writeBuffer) writeBinaryDatum(
ctx context.Context, d ast.Datum, sessionLoc *time.Location, t *types.T,
) {
if log.V(2) {
log.Infof(ctx, "pgwire writing BINARY datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
words, lastBitsUsed := v.EncodingParts()
if len(words) == 0 {
b.putInt32(4)
} else {
// Encode the length of the output bytes. It is computed here so we don't
// have to keep a buffer.
// 4: the int32 of the bitLen.
// 8*(len(words)-1): number of 8-byte words except the last one since it's
// partial.
// (lastBitsUsed+7)/8: number of bytes that will be written in the last
// partial word. The /8 rounds down, such that the +7 will cause 1-or-more
// bits to use a byte, but 0 will not.
b.putInt32(4 + int32(8*(len(words)-1)) + int32((lastBitsUsed+7)/8))
}
bitLen := v.BitLen()
b.putInt32(int32(bitLen))
var byteBuf [8]byte
for i := 0; i < len(words)-1; i++ {
w := words[i]
binary.BigEndian.PutUint64(byteBuf[:], w)
b.write(byteBuf[:])
}
if len(words) > 0 {
w := words[len(words)-1]
for i := uint(0); i < uint(lastBitsUsed); i += 8 {
c := byte(w >> (56 - i))
b.writeByte(c)
}
}
case *ast.DBool:
b.putInt32(1)
if *v {
b.writeByte(1)
} else {
b.writeByte(0)
}
case *ast.DInt:
switch t.Oid() {
case oid.T_int2:
b.putInt32(2)
b.putInt16(int16(*v))
case oid.T_int4:
b.putInt32(4)
b.putInt32(int32(*v))
case oid.T_int8:
b.putInt32(8)
b.putInt64(int64(*v))
default:
b.setError(errors.Errorf("unsupported int oid: %v", t.Oid()))
}
case *ast.DFloat:
switch t.Oid() {
case oid.T_float4:
b.putInt32(4)
b.putInt32(int32(math.Float32bits(float32(*v))))
case oid.T_float8:
b.putInt32(8)
b.putInt64(int64(math.Float64bits(float64(*v))))
default:
b.setError(errors.Errorf("unsupported float oid: %v", t.Oid()))
}
case *ast.DDecimal:
if v.Form != apd.Finite {
b.putInt32(8)
// 0 digits.
b.putInt32(0)
// https://github.com/postgres/postgres/blob/ffa4cbd623dd69f9fa99e5e92426928a5782cf1a/src/backend/utils/adt/numeric.c#L169
b.write([]byte{0xc0, 0, 0, 0})
if v.Form == apd.Infinite {
// TODO(mjibson): #32489
// The above encoding is not correct for Infinity, but since that encoding
// doesn't exist in postgres, it's unclear what to do. For now use the NaN
// encoding and count it to see if anyone even needs this.
telemetry.Inc(sqltelemetry.BinaryDecimalInfinityCounter)
}
return
}
alloc := struct {
pgNum pgwirebase.PGNumeric
bigI big.Int
}{
pgNum: pgwirebase.PGNumeric{
// Since we use 2000 as the exponent limits in ast.DecimalCtx, this
// conversion should not overflow.
Dscale: int16(-v.Exponent),
},
}
if v.Sign() >= 0 {
alloc.pgNum.Sign = pgwirebase.PGNumericPos
} else {
alloc.pgNum.Sign = pgwirebase.PGNumericNeg
}
isZero := func(r rune) bool {
return r == '0'
}
// Mostly cribbed from libpqtypes' str2num.
digits := strings.TrimLeftFunc(alloc.bigI.Abs(&v.Coeff).String(), isZero)
dweight := len(digits) - int(alloc.pgNum.Dscale) - 1
digits = strings.TrimRightFunc(digits, isZero)
if dweight >= 0 {
alloc.pgNum.Weight = int16((dweight+1+pgwirebase.PGDecDigits-1)/pgwirebase.PGDecDigits - 1)
} else {
alloc.pgNum.Weight = int16(-((-dweight-1)/pgwirebase.PGDecDigits + 1))
}
offset := (int(alloc.pgNum.Weight)+1)*pgwirebase.PGDecDigits - (dweight + 1)
alloc.pgNum.Ndigits = int16((len(digits) + offset + pgwirebase.PGDecDigits - 1) / pgwirebase.PGDecDigits)
if len(digits) == 0 {
offset = 0
alloc.pgNum.Ndigits = 0
alloc.pgNum.Weight = 0
}
digitIdx := -offset
nextDigit := func() int16 {
var ndigit int16
for nextDigitIdx := digitIdx + pgwirebase.PGDecDigits; digitIdx < nextDigitIdx; digitIdx++ {
ndigit *= 10
if digitIdx >= 0 && digitIdx < len(digits) {
ndigit += int16(digits[digitIdx] - '0')
}
}
return ndigit
}
b.putInt32(int32(2 * (4 + alloc.pgNum.Ndigits)))
b.putInt16(alloc.pgNum.Ndigits)
b.putInt16(alloc.pgNum.Weight)
b.putInt16(int16(alloc.pgNum.Sign))
b.putInt16(alloc.pgNum.Dscale)
for digitIdx < len(digits) {
b.putInt16(nextDigit())
}
case *ast.DBytes:
b.putInt32(int32(len(*v)))
b.write([]byte(*v))
case *ast.DUuid:
b.putInt32(16)
b.write(v.GetBytes())
case *ast.DIPAddr:
// We calculate the Postgres binary format for an IPAddr. For the spec see,
// https://github.com/postgres/postgres/blob/81c5e46c490e2426db243eada186995da5bb0ba7/src/backend/utils/adt/network.c#L144
// The pgBinary encoding is as follows:
// The int32 length of the following bytes.
// The family byte.
// The mask size byte.
// A 0 byte for is_cidr. It's ignored on the postgres frontend.
// The length of our IP bytes.
// The IP bytes.
const pgIPAddrBinaryHeaderSize = 4
if v.Family == ipaddr.IPv4family {
b.putInt32(net.IPv4len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv4family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv4len))
err := v.Addr.WriteIPv4Bytes(b)
if err != nil {
b.setError(err)
}
} else if v.Family == ipaddr.IPv6family {
b.putInt32(net.IPv6len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv6family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv6len))
err := v.Addr.WriteIPv6Bytes(b)
if err != nil {
b.setError(err)
}
} else {
b.setError(errors.Errorf("error encoding inet to pgBinary: %v", v.IPAddr))
}
case *ast.DEnum:
b.writeLengthPrefixedString(v.LogicalRep)
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DTimestamp:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, nil))
case *ast.DTimestampTZ:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, sessionLoc))
case *ast.DDate:
b.putInt32(4)
b.putInt32(v.PGEpochDays())
case *ast.DTime:
b.putInt32(8)
b.putInt64(int64(*v))
case *ast.DTimeTZ:
b.putInt32(12)
b.putInt64(int64(v.TimeOfDay))
b.putInt32(v.OffsetSecs)
case *ast.DInterval:
b.putInt32(16)
b.putInt64(v.Nanos() / int64(time.Microsecond/time.Nanosecond))
b.putInt32(int32(v.Days))
b.putInt32(int32(v.Months))
case *ast.DTuple:
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of datums.
subWriter.putInt32(int32(len(v.D)))
for _, elem := range v.D {
oid := elem.ResolvedType().Oid()
subWriter.putInt32(int32(oid))
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, elem.ResolvedType())
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DGeography:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DGeometry:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DArray:
if v.ParamTyp.Family() == types.ArrayFamily {
b.setError(unimplemented.NewWithIssueDetail(32552,
"binenc", "unsupported binary serialization of multidimensional arrays"))
return
}
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of dimensions. We currently support 1d arrays only.
var ndims int32 = 1
if v.Len() == 0 {
ndims = 0
}
subWriter.putInt32(ndims)
hasNulls := 0
if v.HasNulls {
hasNulls = 1
}
oid := v.ParamTyp.Oid()
subWriter.putInt32(int32(hasNulls))
subWriter.putInt32(int32(oid))
if v.Len() > 0 {
subWriter.putInt32(int32(v.Len()))
// Lower bound, we only support a lower bound of 1.
subWriter.putInt32(1)
for _, elem := range v.Array {
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, v.ParamTyp)
}
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DJSON:
s := v.JSON.String()
b.putInt32(int32(len(s) + 1))
// Postgres version number, as of writing, `1` is the only valid value.
b.writeByte(1)
b.writeString(s)
case *ast.DOid:
b.putInt32(4)
b.putInt32(int32(v.DInt))
default:
b.setError(errors.AssertionFailedf("unsupported type %T", d))
}
}
const (
pgTimeFormat = "15:04:05.999999"
pgTimeTZFormat = pgTimeFormat + "-07:00"
pgDateFormat = "2006-01-02"
pgTimeStampFormatNoOffset = pgDateFormat + " " + pgTimeFormat
pgTimeStampFormat = pgTimeStampFormatNoOffset + "-07:00"
pgTime2400Format = "24:00:00"
)
// formatTime formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
func formatTime(t timeofday.TimeOfDay, tmp []byte) []byte {
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t == timeofday.Time2400 {
return []byte(pgTime2400Format)
}
return t.ToTime().AppendFormat(tmp, pgTimeFormat)
}
// formatTimeTZ formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
// Note it does not understand the "second" component of the offset as lib/pq
// cannot parse it.
func formatTimeTZ(t timetz.TimeTZ, tmp []byte) []byte {
ret := t.ToTime().AppendFormat(tmp, pgTimeTZFormat)
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t.TimeOfDay == timeofday.Time2400 {
// It instead reads 00:00:00. Replace that text.
var newRet []byte
newRet = append(newRet, pgTime2400Format...)
newRet = append(newRet, ret[len(pgTime2400Format):]...)
ret = newRet
}
return ret
}
func formatTs(t time.Time, offset *time.Location, tmp []byte) (b []byte) {
var format string
if offset != nil {
format = pgTimeStampFormat
} else {
format = pgTimeStampFormatNoOffset
}
return formatTsWithFormat(format, t, offset, tmp)
}
// formatTsWithFormat formats t with an optional offset into a format
// lib/pq understands, appending to the provided tmp buffer and
// reallocating if needed. The function will then return the resulting
// buffer. formatTsWithFormat is mostly cribbed from github.com/lib/pq.
func formatTsWithFormat(format string, t time.Time, offset *time.Location, tmp []byte) (b []byte) {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
if offset != nil {
t = t.In(offset)
}
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b = t.AppendFormat(tmp, format)
if bc {
b = append(b, " BC"...)
}
return b
}
// timeToPgBinary calculates the Postgres binary format for a timestamp. The timestamp
// is represented as the number of microseconds between the given time and Jan 1, 2000
// (dubbed the PGEpochJDate), stored within an int64.
func timeToPgBinary(t time.Time, offset *time.Location) int64 {
if offset != nil {
t = t.In(offset)
} else {
t = t.UTC()
}
return duration.DiffMicros(t, pgwirebase.PGEpochJDate)
} | )
// pgType contains type metadata used in RowDescription messages.
type pgType struct { | random_line_split |
types.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package pgwire
import (
"context"
"encoding/binary"
"fmt"
"math"
"math/big"
"net"
"strconv"
"strings"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/ruiaylin/pgparser/telemetry"
"github.com/ruiaylin/pgparser/lex"
"github.com/ruiaylin/pgparser/pgwire/pgwirebase"
"github.com/ruiaylin/pgparser/ast"
"github.com/ruiaylin/pgparser/sessiondata"
"github.com/ruiaylin/pgparser/sqltelemetry"
"github.com/ruiaylin/pgparser/types"
"github.com/ruiaylin/pgparser/utils/duration"
"github.com/ruiaylin/pgparser/utils/errorutil/unimplemented"
"github.com/ruiaylin/pgparser/utils/ipaddr"
"github.com/ruiaylin/pgparser/utils/log"
"github.com/ruiaylin/pgparser/utils/timeofday"
"github.com/ruiaylin/pgparser/utils/timetz"
"github.com/cockroachdb/errors"
"github.com/lib/pq/oid"
)
// pgType contains type metadata used in RowDescription messages.
type pgType struct {
oid oid.Oid
// Variable-size types have size=-1.
// Note that the protocol has both int16 and int32 size fields,
// so this attribute is an unsized int and should be cast
// as needed.
// This field does *not* correspond to the encoded length of a
// data type, so it's unclear what, if anything, it is used for.
// To get the right value, "SELECT oid, typlen FROM pg_type"
// on a postgres server.
size int
}
func pgTypeForParserType(t *types.T) pgType {
size := -1
if s, variable := ast.DatumTypeSize(t); !variable {
size = int(s)
}
return pgType{
oid: t.Oid(),
size: size,
}
}
// resolveBlankPaddedChar pads the given string with spaces if blank padding is
// required or returns the string unmodified otherwise.
func resolveBlankPaddedChar(s string, t *types.T) string {
if t.Oid() == oid.T_bpchar {
// Pad spaces on the right of the string to make it of length specified in
// the type t.
return fmt.Sprintf("%-*v", t.Width(), s)
}
return s
}
// writeTextDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings and therefore need padding (chars).
// It is ignored (and can be nil) for types which do not need padding.
func (b *writeBuffer) writeTextDatum(
ctx context.Context, d ast.Datum, conv sessiondata.DataConversionConfig, t *types.T,
) {
if log.V(2) {
log.Infof(ctx, "pgwire writing TEXT datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DBool:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DInt:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendInt(b.putbuf[4:4], int64(*v), 10)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DFloat:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := strconv.AppendFloat(b.putbuf[4:4], float64(*v), 'g', conv.GetFloatPrec(), 64)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DDecimal:
b.writeLengthPrefixedDatum(v)
case *ast.DBytes:
result := lex.EncodeByteArrayToRawBytes(
string(*v), conv.BytesEncodeFormat, false /* skipHexPrefix */)
b.putInt32(int32(len(result)))
b.write([]byte(result))
case *ast.DUuid:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := b.putbuf[4 : 4+36]
v.UUID.StringBytes(s)
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DIPAddr:
b.writeLengthPrefixedString(v.IPAddr.String())
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DDate:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DTime:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTime(timeofday.TimeOfDay(*v), b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimeTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTimeTZ(v.TimeTZ, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DGeography:
s := v.Geography.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DGeometry:
s := v.Geometry.EWKBHex()
b.putInt32(int32(len(s)))
b.write([]byte(s))
case *ast.DTimestamp:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, nil, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DTimestampTZ:
// Start at offset 4 because `putInt32` clobbers the first 4 bytes.
s := formatTs(v.Time, conv.Location, b.putbuf[4:4])
b.putInt32(int32(len(s)))
b.write(s)
case *ast.DInterval:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DJSON:
b.writeLengthPrefixedString(v.JSON.String())
case *ast.DTuple:
b.textFormatter.FormatNode(v)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DArray:
// Arrays have custom formatting depending on their OID.
b.textFormatter.FormatNode(d)
b.writeFromFmtCtx(b.textFormatter)
case *ast.DOid:
b.writeLengthPrefixedDatum(v)
case *ast.DEnum:
// Enums are serialized with their logical representation.
b.writeLengthPrefixedString(v.LogicalRep)
default:
b.setError(errors.Errorf("unsupported type %T", d))
}
}
// writeBinaryDatum writes d to the buffer. Type t must be specified for types
// that have various width encodings (floats, ints, chars). It is ignored
// (and can be nil) for types with a 1:1 datum:type mapping.
func (b *writeBuffer) writeBinaryDatum(
ctx context.Context, d ast.Datum, sessionLoc *time.Location, t *types.T,
) {
if log.V(2) {
log.Infof(ctx, "pgwire writing BINARY datum of type: %T, %#v", d, d)
}
if d == ast.DNull {
// NULL is encoded as -1; all other values have a length prefix.
b.putInt32(-1)
return
}
switch v := ast.UnwrapDatum(nil, d).(type) {
case *ast.DBitArray:
words, lastBitsUsed := v.EncodingParts()
if len(words) == 0 {
b.putInt32(4)
} else {
// Encode the length of the output bytes. It is computed here so we don't
// have to keep a buffer.
// 4: the int32 of the bitLen.
// 8*(len(words)-1): number of 8-byte words except the last one since it's
// partial.
// (lastBitsUsed+7)/8: number of bytes that will be written in the last
// partial word. The /8 rounds down, such that the +7 will cause 1-or-more
// bits to use a byte, but 0 will not.
b.putInt32(4 + int32(8*(len(words)-1)) + int32((lastBitsUsed+7)/8))
}
bitLen := v.BitLen()
b.putInt32(int32(bitLen))
var byteBuf [8]byte
for i := 0; i < len(words)-1; i++ {
w := words[i]
binary.BigEndian.PutUint64(byteBuf[:], w)
b.write(byteBuf[:])
}
if len(words) > 0 {
w := words[len(words)-1]
for i := uint(0); i < uint(lastBitsUsed); i += 8 {
c := byte(w >> (56 - i))
b.writeByte(c)
}
}
case *ast.DBool:
b.putInt32(1)
if *v {
b.writeByte(1)
} else {
b.writeByte(0)
}
case *ast.DInt:
switch t.Oid() {
case oid.T_int2:
b.putInt32(2)
b.putInt16(int16(*v))
case oid.T_int4:
b.putInt32(4)
b.putInt32(int32(*v))
case oid.T_int8:
b.putInt32(8)
b.putInt64(int64(*v))
default:
b.setError(errors.Errorf("unsupported int oid: %v", t.Oid()))
}
case *ast.DFloat:
switch t.Oid() {
case oid.T_float4:
b.putInt32(4)
b.putInt32(int32(math.Float32bits(float32(*v))))
case oid.T_float8:
b.putInt32(8)
b.putInt64(int64(math.Float64bits(float64(*v))))
default:
b.setError(errors.Errorf("unsupported float oid: %v", t.Oid()))
}
case *ast.DDecimal:
if v.Form != apd.Finite {
b.putInt32(8)
// 0 digits.
b.putInt32(0)
// https://github.com/postgres/postgres/blob/ffa4cbd623dd69f9fa99e5e92426928a5782cf1a/src/backend/utils/adt/numeric.c#L169
b.write([]byte{0xc0, 0, 0, 0})
if v.Form == apd.Infinite {
// TODO(mjibson): #32489
// The above encoding is not correct for Infinity, but since that encoding
// doesn't exist in postgres, it's unclear what to do. For now use the NaN
// encoding and count it to see if anyone even needs this.
telemetry.Inc(sqltelemetry.BinaryDecimalInfinityCounter)
}
return
}
alloc := struct {
pgNum pgwirebase.PGNumeric
bigI big.Int
}{
pgNum: pgwirebase.PGNumeric{
// Since we use 2000 as the exponent limits in ast.DecimalCtx, this
// conversion should not overflow.
Dscale: int16(-v.Exponent),
},
}
if v.Sign() >= 0 {
alloc.pgNum.Sign = pgwirebase.PGNumericPos
} else {
alloc.pgNum.Sign = pgwirebase.PGNumericNeg
}
isZero := func(r rune) bool {
return r == '0'
}
// Mostly cribbed from libpqtypes' str2num.
digits := strings.TrimLeftFunc(alloc.bigI.Abs(&v.Coeff).String(), isZero)
dweight := len(digits) - int(alloc.pgNum.Dscale) - 1
digits = strings.TrimRightFunc(digits, isZero)
if dweight >= 0 {
alloc.pgNum.Weight = int16((dweight+1+pgwirebase.PGDecDigits-1)/pgwirebase.PGDecDigits - 1)
} else {
alloc.pgNum.Weight = int16(-((-dweight-1)/pgwirebase.PGDecDigits + 1))
}
offset := (int(alloc.pgNum.Weight)+1)*pgwirebase.PGDecDigits - (dweight + 1)
alloc.pgNum.Ndigits = int16((len(digits) + offset + pgwirebase.PGDecDigits - 1) / pgwirebase.PGDecDigits)
if len(digits) == 0 {
offset = 0
alloc.pgNum.Ndigits = 0
alloc.pgNum.Weight = 0
}
digitIdx := -offset
nextDigit := func() int16 {
var ndigit int16
for nextDigitIdx := digitIdx + pgwirebase.PGDecDigits; digitIdx < nextDigitIdx; digitIdx++ {
ndigit *= 10
if digitIdx >= 0 && digitIdx < len(digits) {
ndigit += int16(digits[digitIdx] - '0')
}
}
return ndigit
}
b.putInt32(int32(2 * (4 + alloc.pgNum.Ndigits)))
b.putInt16(alloc.pgNum.Ndigits)
b.putInt16(alloc.pgNum.Weight)
b.putInt16(int16(alloc.pgNum.Sign))
b.putInt16(alloc.pgNum.Dscale)
for digitIdx < len(digits) {
b.putInt16(nextDigit())
}
case *ast.DBytes:
b.putInt32(int32(len(*v)))
b.write([]byte(*v))
case *ast.DUuid:
b.putInt32(16)
b.write(v.GetBytes())
case *ast.DIPAddr:
// We calculate the Postgres binary format for an IPAddr. For the spec see,
// https://github.com/postgres/postgres/blob/81c5e46c490e2426db243eada186995da5bb0ba7/src/backend/utils/adt/network.c#L144
// The pgBinary encoding is as follows:
// The int32 length of the following bytes.
// The family byte.
// The mask size byte.
// A 0 byte for is_cidr. It's ignored on the postgres frontend.
// The length of our IP bytes.
// The IP bytes.
const pgIPAddrBinaryHeaderSize = 4
if v.Family == ipaddr.IPv4family {
b.putInt32(net.IPv4len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv4family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv4len))
err := v.Addr.WriteIPv4Bytes(b)
if err != nil {
b.setError(err)
}
} else if v.Family == ipaddr.IPv6family {
b.putInt32(net.IPv6len + pgIPAddrBinaryHeaderSize)
b.writeByte(pgwirebase.PGBinaryIPv6family)
b.writeByte(v.Mask)
b.writeByte(0)
b.writeByte(byte(net.IPv6len))
err := v.Addr.WriteIPv6Bytes(b)
if err != nil {
b.setError(err)
}
} else {
b.setError(errors.Errorf("error encoding inet to pgBinary: %v", v.IPAddr))
}
case *ast.DEnum:
b.writeLengthPrefixedString(v.LogicalRep)
case *ast.DString:
b.writeLengthPrefixedString(resolveBlankPaddedChar(string(*v), t))
case *ast.DCollatedString:
b.writeLengthPrefixedString(v.Contents)
case *ast.DTimestamp:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, nil))
case *ast.DTimestampTZ:
b.putInt32(8)
b.putInt64(timeToPgBinary(v.Time, sessionLoc))
case *ast.DDate:
b.putInt32(4)
b.putInt32(v.PGEpochDays())
case *ast.DTime:
b.putInt32(8)
b.putInt64(int64(*v))
case *ast.DTimeTZ:
b.putInt32(12)
b.putInt64(int64(v.TimeOfDay))
b.putInt32(v.OffsetSecs)
case *ast.DInterval:
b.putInt32(16)
b.putInt64(v.Nanos() / int64(time.Microsecond/time.Nanosecond))
b.putInt32(int32(v.Days))
b.putInt32(int32(v.Months))
case *ast.DTuple:
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of datums.
subWriter.putInt32(int32(len(v.D)))
for _, elem := range v.D {
oid := elem.ResolvedType().Oid()
subWriter.putInt32(int32(oid))
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, elem.ResolvedType())
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DGeography:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DGeometry:
b.putInt32(int32(len(v.EWKB())))
b.write(v.EWKB())
case *ast.DArray:
if v.ParamTyp.Family() == types.ArrayFamily {
b.setError(unimplemented.NewWithIssueDetail(32552,
"binenc", "unsupported binary serialization of multidimensional arrays"))
return
}
// TODO(andrei): We shouldn't be allocating a new buffer for every array.
subWriter := newWriteBuffer(nil /* bytecount */)
// Put the number of dimensions. We currently support 1d arrays only.
var ndims int32 = 1
if v.Len() == 0 {
ndims = 0
}
subWriter.putInt32(ndims)
hasNulls := 0
if v.HasNulls {
hasNulls = 1
}
oid := v.ParamTyp.Oid()
subWriter.putInt32(int32(hasNulls))
subWriter.putInt32(int32(oid))
if v.Len() > 0 {
subWriter.putInt32(int32(v.Len()))
// Lower bound, we only support a lower bound of 1.
subWriter.putInt32(1)
for _, elem := range v.Array {
subWriter.writeBinaryDatum(ctx, elem, sessionLoc, v.ParamTyp)
}
}
b.writeLengthPrefixedBuffer(&subWriter.wrapped)
case *ast.DJSON:
s := v.JSON.String()
b.putInt32(int32(len(s) + 1))
// Postgres version number, as of writing, `1` is the only valid value.
b.writeByte(1)
b.writeString(s)
case *ast.DOid:
b.putInt32(4)
b.putInt32(int32(v.DInt))
default:
b.setError(errors.AssertionFailedf("unsupported type %T", d))
}
}
const (
pgTimeFormat = "15:04:05.999999"
pgTimeTZFormat = pgTimeFormat + "-07:00"
pgDateFormat = "2006-01-02"
pgTimeStampFormatNoOffset = pgDateFormat + " " + pgTimeFormat
pgTimeStampFormat = pgTimeStampFormatNoOffset + "-07:00"
pgTime2400Format = "24:00:00"
)
// formatTime formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
func formatTime(t timeofday.TimeOfDay, tmp []byte) []byte {
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t == timeofday.Time2400 {
return []byte(pgTime2400Format)
}
return t.ToTime().AppendFormat(tmp, pgTimeFormat)
}
// formatTimeTZ formats t into a format lib/pq understands, appending to the
// provided tmp buffer and reallocating if needed. The function will then return
// the resulting buffer.
// Note it does not understand the "second" component of the offset as lib/pq
// cannot parse it.
func | (t timetz.TimeTZ, tmp []byte) []byte {
ret := t.ToTime().AppendFormat(tmp, pgTimeTZFormat)
// time.Time's AppendFormat does not recognize 2400, so special case it accordingly.
if t.TimeOfDay == timeofday.Time2400 {
// It instead reads 00:00:00. Replace that text.
var newRet []byte
newRet = append(newRet, pgTime2400Format...)
newRet = append(newRet, ret[len(pgTime2400Format):]...)
ret = newRet
}
return ret
}
func formatTs(t time.Time, offset *time.Location, tmp []byte) (b []byte) {
var format string
if offset != nil {
format = pgTimeStampFormat
} else {
format = pgTimeStampFormatNoOffset
}
return formatTsWithFormat(format, t, offset, tmp)
}
// formatTsWithFormat formats t with an optional offset into a format
// lib/pq understands, appending to the provided tmp buffer and
// reallocating if needed. The function will then return the resulting
// buffer. formatTsWithFormat is mostly cribbed from github.com/lib/pq.
func formatTsWithFormat(format string, t time.Time, offset *time.Location, tmp []byte) (b []byte) {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
if offset != nil {
t = t.In(offset)
}
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b = t.AppendFormat(tmp, format)
if bc {
b = append(b, " BC"...)
}
return b
}
// timeToPgBinary calculates the Postgres binary format for a timestamp. The timestamp
// is represented as the number of microseconds between the given time and Jan 1, 2000
// (dubbed the PGEpochJDate), stored within an int64.
func timeToPgBinary(t time.Time, offset *time.Location) int64 {
if offset != nil {
t = t.In(offset)
} else {
t = t.UTC()
}
return duration.DiffMicros(t, pgwirebase.PGEpochJDate)
}
| formatTimeTZ | identifier_name |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? && !overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if !self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if !store.exists(&acct_key)? {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
}
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if !root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
}
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if !path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if !tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?;
Ok(())
}
fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn | (&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
}
| next_tx_log_id | identifier_name |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? && !overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if !self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if !store.exists(&acct_key)? {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
}
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if !root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
}
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if !path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if !tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?; | fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn next_tx_log_id(&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
} | Ok(())
}
| random_line_split |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? && !overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if !self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if !store.exists(&acct_key)? |
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if !root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
}
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if !path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if !tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?;
Ok(())
}
fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn next_tx_log_id(&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
}
| {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
} | conditional_block |
lmdb_backend.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::types::{
AcctPathMapping, ChildNumber, Context, Identifier, NodeClient, OutputData, Result, Transaction,
TxLogEntry, TxProof, WalletBackend, WalletBackendBatch, WalletSeed,
};
use crate::common::config::WalletConfig;
use crate::common::{ErrorKind, Keychain};
use crate::internal::restore;
use blake2_rfc::blake2b::Blake2b;
use chrono::Utc;
use failure::ResultExt;
use grin_core::{global, ser};
use grin_keychain::SwitchCommitmentType;
use grin_store::Store;
use grin_store::{self, option_to_not_found, to_key, to_key_u64};
use grin_util::secp::constants::SECRET_KEY_SIZE;
use grin_util::{from_hex, to_hex, ZeroingString};
use std::cell::RefCell;
use std::fs::{self, File};
use std::io::{Read, Write};
use std::ops::Deref;
use std::path::Path;
pub const DB_DIR: &'static str = "db";
pub const TX_SAVE_DIR: &'static str = "saved_txs";
pub const TX_PROOF_SAVE_DIR: &'static str = "saved_proofs";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
const PRIVATE_TX_CONTEXT_PREFIX: u8 = 'p' as u8;
const TX_LOG_ENTRY_PREFIX: u8 = 't' as u8;
const TX_LOG_ID_PREFIX: u8 = 'i' as u8;
const ACCOUNT_PATH_MAPPING_PREFIX: u8 = 'a' as u8;
fn private_ctx_xor_keys<K>(
keychain: &K,
slate_id: &[u8],
) -> Result<([u8; SECRET_KEY_SIZE], [u8; SECRET_KEY_SIZE])>
where
K: Keychain,
{
let root_key = keychain.derive_key(0, &K::root_key_id(), &SwitchCommitmentType::None)?;
// derive XOR values for storing secret values in DB
// h(root_key|slate_id|"blind")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"blind".as_bytes()[..]);
let blind_xor_key = hasher.finalize();
let mut ret_blind = [0; SECRET_KEY_SIZE];
ret_blind.copy_from_slice(&blind_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
// h(root_key|slate_id|"nonce")
let mut hasher = Blake2b::new(SECRET_KEY_SIZE);
hasher.update(&root_key.0[..]);
hasher.update(&slate_id[..]);
hasher.update(&"nonce".as_bytes()[..]);
let nonce_xor_key = hasher.finalize();
let mut ret_nonce = [0; SECRET_KEY_SIZE];
ret_nonce.copy_from_slice(&nonce_xor_key.as_bytes()[0..SECRET_KEY_SIZE]);
Ok((ret_blind, ret_nonce))
}
pub struct Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
db: Option<Store>,
password: Option<ZeroingString>,
pub keychain: Option<K>,
parent_key_id: Identifier,
config: WalletConfig,
w2n_client: C,
}
impl<C, K> Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
fn db(&self) -> Result<&Store> {
self.db.as_ref().ok_or(ErrorKind::NoWallet.into())
}
/// Create `Backend` instance
pub fn new(config: &WalletConfig, client: C) -> Result<Self> {
Ok(Self {
db: None,
password: None,
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: client,
})
}
/*pub fn new(config: &WalletConfig, password: &str, n_client: C) -> Result<Self> {
let res = Backend {
db: None,
password: Some(ZeroingString::from(password)),
keychain: None,
parent_key_id: K::derive_key_id(2, 0, 0, 0, 0),
config: config.clone(),
w2n_client: n_client,
};
Ok(res)
}*/
}
impl<C, K> WalletBackend<C, K> for Backend<C, K>
where
C: NodeClient,
K: Keychain,
{
/// Check whether the backend has a seed or not
fn has_seed(&self) -> Result<bool> {
Ok(WalletSeed::seed_file_exists(&self.config).is_err())
}
/// Get the seed
fn get_seed(&self) -> Result<ZeroingString> {
match &self.password {
Some(p) => {
let seed = WalletSeed::from_file(&self.config, p)?;
seed.to_mnemonic().map(|s| s.into())
}
None => Err(ErrorKind::NoWallet.into()),
}
}
/// Set a new seed, encrypt with `password`
/// Should fail if backend already has a seed,
/// unless `overwrite` is set to `true
fn set_seed(
&mut self,
mnemonic: Option<ZeroingString>,
password: ZeroingString,
overwrite: bool,
) -> Result<()> {
if self.has_seed()? && !overwrite {
return Err(ErrorKind::WalletHasSeed.into());
}
self.password = Some(password.clone());
let _ = WalletSeed::init_file(&self.config, 24, mnemonic, &password, overwrite)?;
Ok(())
}
/// Check if the backend connection is established
fn connected(&self) -> Result<bool> {
Ok(self.db.is_some())
}
/// Connect to the backend
fn connect(&mut self) -> Result<()> {
if !self.has_seed()? {
return Err(ErrorKind::WalletNoSeed.into());
}
if self.connected()? {
return Err(ErrorKind::WalletConnected.into());
}
let root_path = Path::new(&self.config.data_file_dir);
let db_path = root_path.join(DB_DIR);
fs::create_dir_all(&db_path)?;
let stored_tx_path = root_path.join(TX_SAVE_DIR);
fs::create_dir_all(&stored_tx_path)?;
let stored_tx_proof_path = root_path.join(TX_PROOF_SAVE_DIR);
fs::create_dir_all(&stored_tx_proof_path)?;
let store = Store::new(db_path.to_str().unwrap(), None, Some(DB_DIR), None)?;
let default_account = AcctPathMapping {
label: "default".to_string(),
path: K::derive_key_id(2, 0, 0, 0, 0),
};
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut default_account.label.as_bytes().to_vec(),
);
if !store.exists(&acct_key)? {
let batch = store.batch()?;
batch.put_ser(&acct_key, &default_account)?;
batch.commit()?;
}
self.db = Some(store);
Ok(())
}
/// Disconnect from backend
fn disconnect(&mut self) -> Result<()> {
self.db = None;
Ok(())
}
/// Set password
fn set_password(&mut self, password: ZeroingString) -> Result<()> {
let _ = WalletSeed::from_file(&self.config, password.deref())?;
self.password = Some(password);
Ok(())
}
/// Clear out backend
fn clear(&mut self) -> Result<()> {
self.disconnect()?;
let root_path = Path::new(&self.config.data_file_dir);
if !root_path.exists() {
return Ok(());
}
let backup_dir = Utc::now().format("%Y%m%d-%H%M%S").to_string();
let backup_path = root_path.join("backups").join(backup_dir);
fs::create_dir_all(&backup_path)?;
let db_path = root_path.join(DB_DIR);
if db_path.exists() {
fs::rename(&db_path, &backup_path.join(DB_DIR))?;
}
let txs_path = root_path.join(TX_SAVE_DIR);
if txs_path.exists() {
fs::rename(&txs_path, &backup_path.join(TX_SAVE_DIR))?;
}
let proofs_path = root_path.join(TX_PROOF_SAVE_DIR);
if proofs_path.exists() {
fs::rename(&proofs_path, &backup_path.join(TX_PROOF_SAVE_DIR))?;
}
self.connect()?;
Ok(())
}
/// Initialise with whatever stored credentials we have
fn open_with_credentials(&mut self) -> Result<()> {
let wallet_seed = WalletSeed::from_file(
&self.config,
&self.password.clone().ok_or(ErrorKind::OpenWalletError)?,
)
.map_err(|_| ErrorKind::OpenWalletError)?;
self.keychain = Some(
wallet_seed
.derive_keychain(global::is_floonet())
.map_err(|_| ErrorKind::DeriveKeychainError)?,
);
Ok(())
}
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<()> {
self.keychain = None;
Ok(())
}
/// Return the keychain being used
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
/// Return the node client being used
fn w2n_client(&mut self) -> &mut C {
&mut self.w2n_client
}
/// Set parent path by account name
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<()> {
let label = label.to_owned();
let res = self.accounts()?.find(|l| l.label == label);
if let Some(a) = res {
self.set_parent_key_id(&a.path);
Ok(())
} else {
return Err(ErrorKind::UnknownAccountLabel(label.clone()).into());
}
}
/// set parent path
fn set_parent_key_id(&mut self, id: &Identifier) {
self.parent_key_id = id.clone();
}
fn get_parent_key_id(&self) -> Identifier {
self.parent_key_id.clone()
}
fn get_output(&self, id: &Identifier, mmr_index: &Option<u64>) -> Result<OutputData> {
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
option_to_not_found(self.db()?.get_ser(&key), || format!("Key Id: {}", id))
.map_err(|e| e.into())
}
fn outputs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = OutputData> + 'a>> {
Ok(Box::new(
self.db()?.iter(&[OUTPUT_PREFIX]).unwrap().map(|x| x.1),
))
}
fn get_tx_log_by_slate_id(&self, slate_id: &str) -> Result<Option<TxLogEntry>> {
let key = to_key(TX_LOG_ENTRY_PREFIX, &mut slate_id.as_bytes().to_vec());
self.db()?.get_ser(&key).map_err(|e| e.into())
}
fn tx_logs<'a>(&'a self) -> Result<Box<dyn Iterator<Item = TxLogEntry> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[TX_LOG_ENTRY_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<Context> |
fn accounts<'a>(&'a self) -> Result<Box<dyn Iterator<Item = AcctPathMapping> + 'a>> {
Ok(Box::new(
self.db()?
.iter(&[ACCOUNT_PATH_MAPPING_PREFIX])
.unwrap()
.map(|x| x.1),
))
}
fn get_acct_path(&self, label: &str) -> Result<Option<AcctPathMapping>> {
let acct_key = to_key(ACCOUNT_PATH_MAPPING_PREFIX, &mut label.as_bytes().to_vec());
let ser = self.db()?.get_ser(&acct_key)?;
Ok(ser)
}
fn get_stored_tx(&self, uuid: &str) -> Result<Option<Transaction>> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
if !path.exists() {
return Ok(None);
}
let tx_file = Path::new(&path).to_path_buf();
let mut tx_f = File::open(tx_file)?;
let mut content = String::new();
tx_f.read_to_string(&mut content)?;
let tx_bin = from_hex(content).unwrap();
Ok(Some(
ser::deserialize::<Transaction>(&mut &tx_bin[..], ser::ProtocolVersion(1)).unwrap(),
))
}
fn has_stored_tx_proof(&self, uuid: &str) -> Result<bool> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
Ok(tx_proof_file.exists())
}
fn get_stored_tx_proof(&self, uuid: &str) -> Result<Option<TxProof>> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let tx_proof_file = Path::new(&path).to_path_buf();
if !tx_proof_file.exists() {
return Ok(None);
}
let mut tx_proof_f = File::open(tx_proof_file)?;
let mut content = String::new();
tx_proof_f.read_to_string(&mut content)?;
Ok(Some(serde_json::from_str(&content)?))
}
fn batch<'a>(&'a self) -> Result<Box<dyn WalletBackendBatch<K> + 'a>> {
Ok(Box::new(Batch {
_store: self,
db: RefCell::new(Some(self.db()?.batch()?)),
keychain: self.keychain.clone(),
}))
}
fn next_child<'a>(&mut self) -> Result<Identifier> {
let mut deriv_idx = {
let batch = self.db()?.batch()?;
let deriv_key = to_key(DERIV_PREFIX, &mut self.parent_key_id.to_bytes().to_vec());
match batch.get_ser(&deriv_key)? {
Some(idx) => idx,
None => 0,
}
};
let mut return_path = self.parent_key_id.to_path();
return_path.depth = return_path.depth + 1;
return_path.path[return_path.depth as usize - 1] = ChildNumber::from(deriv_idx);
deriv_idx = deriv_idx + 1;
let mut batch = self.batch()?;
batch.save_child_index(&self.parent_key_id, deriv_idx)?;
batch.commit()?;
Ok(Identifier::from_path(&return_path))
}
fn get_last_confirmed_height<'a>(&self) -> Result<u64> {
let batch = self.db()?.batch()?;
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self.parent_key_id.to_bytes().to_vec(),
);
let last_confirmed_height = match batch.get_ser(&height_key)? {
Some(h) => h,
None => 0,
};
Ok(last_confirmed_height)
}
fn restore(&mut self) -> Result<()> {
restore::restore(self).context(ErrorKind::Restore)?;
Ok(())
}
fn check_repair(&mut self, delete_unconfirmed: bool) -> Result<()> {
restore::check_repair(self, delete_unconfirmed).context(ErrorKind::Restore)?;
Ok(())
}
fn calc_commit_for_cache(&mut self, amount: u64, id: &Identifier) -> Result<Option<String>> {
if self.config.no_commit_cache == Some(true) {
Ok(None)
} else {
Ok(Some(grin_util::to_hex(
self.keychain()
.commit(amount, id, &SwitchCommitmentType::Regular)?
.0
.to_vec(),
)))
}
}
}
/// An atomic batch in which all changes can be committed all at once or
/// discarded on error.
pub struct Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
_store: &'a Backend<C, K>,
db: RefCell<Option<grin_store::Batch<'a>>>,
/// Keychain
keychain: Option<K>,
}
#[allow(missing_docs)]
impl<'a, C, K> WalletBackendBatch<K> for Batch<'a, C, K>
where
C: NodeClient,
K: Keychain,
{
fn keychain(&mut self) -> &mut K {
self.keychain.as_mut().unwrap()
}
fn save_output(&mut self, out: &OutputData) -> Result<()> {
// Save the output data to the db.
{
let key = match out.mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec(), i),
None => to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec()),
};
self.db.borrow().as_ref().unwrap().put_ser(&key, &out)?;
}
Ok(())
}
fn delete_output(&mut self, id: &Identifier, mmr_index: &Option<u64>) -> Result<()> {
// Delete the output data.
{
let key = match mmr_index {
Some(i) => to_key_u64(OUTPUT_PREFIX, &mut id.to_bytes().to_vec(), *i),
None => to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec()),
};
let _ = self.db.borrow().as_ref().unwrap().delete(&key);
}
Ok(())
}
fn store_tx(&self, uuid: &str, tx: &Transaction) -> Result<()> {
let filename = format!("{}.grintx", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let tx_hex = to_hex(ser::ser_vec(tx, ser::ProtocolVersion(1)).unwrap());
stored_tx.write_all(&tx_hex.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn store_tx_proof(&self, uuid: &str, tx_proof: &TxProof) -> Result<()> {
let filename = format!("{}.proof", uuid);
let path = Path::new(&self._store.config.data_file_dir)
.join(TX_PROOF_SAVE_DIR)
.join(filename);
let path_buf = Path::new(&path).to_path_buf();
let mut stored_tx = File::create(path_buf)?;
let proof_ser = serde_json::to_string(tx_proof)?;
stored_tx.write_all(&proof_ser.as_bytes())?;
stored_tx.sync_all()?;
Ok(())
}
fn next_tx_log_id(&mut self, parent_key_id: &Identifier) -> Result<u32> {
let tx_id_key = to_key(TX_LOG_ID_PREFIX, &mut parent_key_id.to_bytes().to_vec());
let last_tx_log_id = match self.db.borrow().as_ref().unwrap().get_ser(&tx_id_key)? {
Some(t) => t,
None => 0,
};
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_id_key, &(last_tx_log_id + 1))?;
Ok(last_tx_log_id)
}
fn save_last_confirmed_height(&mut self, height: u64) -> Result<()> {
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut self._store.get_parent_key_id().to_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&height_key, &height)?;
Ok(())
}
fn save_child_index(&mut self, parent_key_id: &Identifier, index: u32) -> Result<()> {
let deriv_key = to_key(DERIV_PREFIX, &mut parent_key_id.to_bytes().to_vec());
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&deriv_key, &index)?;
Ok(())
}
fn save_tx_log_entry(&mut self, t: &TxLogEntry) -> Result<()> {
let tx_log_key = to_key_u64(
TX_LOG_ENTRY_PREFIX,
&mut t.parent_key_id.to_bytes().to_vec(),
t.id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&tx_log_key, &t)?;
Ok(())
}
fn save_acct_path(&mut self, mapping: &AcctPathMapping) -> Result<()> {
let acct_key = to_key(
ACCOUNT_PATH_MAPPING_PREFIX,
&mut mapping.label.as_bytes().to_vec(),
);
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&acct_key, &mapping)?;
Ok(())
}
fn lock_output(&mut self, out: &mut OutputData) -> Result<()> {
out.lock();
self.save_output(out)
}
fn save_private_context(
&mut self,
slate_id: &[u8],
participant_id: usize,
ctx: &Context,
) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut s_ctx = ctx.clone();
for i in 0..SECRET_KEY_SIZE {
s_ctx.sec_key.0[i] = s_ctx.sec_key.0[i] ^ blind_xor_key[i];
s_ctx.sec_nonce.0[i] = s_ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
self.db
.borrow()
.as_ref()
.unwrap()
.put_ser(&ctx_key, &s_ctx)?;
Ok(())
}
fn delete_private_context(&mut self, slate_id: &[u8], participant_id: usize) -> Result<()> {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
self.db
.borrow()
.as_ref()
.unwrap()
.delete(&ctx_key)
.map_err(|e| e.into())
}
fn commit(&mut self) -> Result<()> {
let db = self.db.replace(None);
db.unwrap().commit()?;
Ok(())
}
}
| {
let ctx_key = to_key_u64(
PRIVATE_TX_CONTEXT_PREFIX,
&mut slate_id.to_vec(),
participant_id as u64,
);
let (blind_xor_key, nonce_xor_key) = private_ctx_xor_keys(self.keychain(), slate_id)?;
let mut ctx: Context = option_to_not_found(self.db()?.get_ser(&ctx_key), || {
format!("Slate id: {:x?}", slate_id.to_vec())
})?;
for i in 0..SECRET_KEY_SIZE {
ctx.sec_key.0[i] = ctx.sec_key.0[i] ^ blind_xor_key[i];
ctx.sec_nonce.0[i] = ctx.sec_nonce.0[i] ^ nonce_xor_key[i];
}
Ok(ctx)
} | identifier_body |
lib.rs | //! Welcome to CCP.
//!
//! This crate, portus, implements a CCP. This includes:
//! 1. An interface definition for external types wishing to implement congestion control
//! algorithms (`CongAlg`).
//! 2. A [compiler](lang/index.html) for datapath programs.
//! 3. An IPC and serialization [layer](ipc/index.html) for communicating with libccp-compliant datapaths.
//!
//! The entry points into portus are [`run`](./fn.run.html) and [`spawn`](./fn.spawn.html), which start
//! the CCP algorithm runtime. There is also the convenience macro [`start`](./macro.start.html).
//!
//! The runtime listens for datapath messages and dispatches calls to
//! the appropriate congestion control methods.
//!
//! Example
//! =======
//!
//! The following congestion control algorithm sets the congestion window to `42`, and prints the
//! minimum RTT observed over 42 millisecond intervals.
//!
//! ```
//! extern crate fnv;
//! extern crate portus;
//! use std::collections::HashMap;
//! use portus::{CongAlg, Flow, Config, Datapath, DatapathInfo, DatapathTrait, Report};
//! use portus::ipc::Ipc;
//! use portus::lang::Scope;
//! use portus::lang::Bin;
//!
//! #[derive(Clone, Default)]
//! struct MyCongestionControlAlgorithm(Scope);
//!
//! impl<I: Ipc> CongAlg<I> for MyCongestionControlAlgorithm {
//! type Flow = Self;
//!
//! fn name() -> &'static str {
//! "My congestion control algorithm"
//! }
//! fn datapath_programs(&self) -> HashMap<&'static str, String> {
//! let mut h = HashMap::default();
//! h.insert(
//! "MyProgram", "
//! (def (Report
//! (volatile minrtt +infinity)
//! ))
//! (when true
//! (:= Report.minrtt (min Report.minrtt Flow.rtt_sample_us))
//! )
//! (when (> Micros 42000)
//! (report)
//! (reset)
//! )
//! ".to_owned(),
//! );
//! h
//! }
//! fn new_flow(&self, mut control: Datapath<I>, info: DatapathInfo) -> Self::Flow {
//! let sc = control.set_program("MyProgram", None).unwrap();
//! MyCongestionControlAlgorithm(sc)
//! }
//! }
//! impl Flow for MyCongestionControlAlgorithm {
//! fn on_report(&mut self, sock_id: u32, m: Report) {
//! println!("minrtt: {:?}", m.get_field("Report.minrtt", &self.0).unwrap());
//! }
//! }
//! ```
#![feature(box_patterns)]
#![feature(integer_atomics)]
#![feature(never_type)]
#![feature(stmt_expr_attributes)]
#![feature(test)]
extern crate bytes;
extern crate clap;
extern crate crossbeam;
extern crate fnv;
extern crate libc;
extern crate nix;
#[macro_use]
extern crate nom;
extern crate time;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_term;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::{atomic, Arc};
use std::thread;
pub mod ipc;
pub mod lang;
pub mod serialize;
pub mod test_helper;
#[macro_use]
pub mod algs;
mod errors;
pub use crate::errors::*;
use crate::ipc::Ipc;
use crate::ipc::{BackendBuilder, BackendSender};
use crate::lang::{Bin, Reg, Scope};
use crate::serialize::Msg;
/// CCP custom `Result` type, using `Error` as the `Err` type.
pub type Result<T> = std::result::Result<T, Error>;
/// A collection of methods to interact with the datapath.
pub trait DatapathTrait {
fn get_sock_id(&self) -> u32;
/// Tell datapath to use a preinstalled program.
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>, | fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>;
}
/// A collection of methods to interact with the datapath.
#[derive(Clone)]
pub struct Datapath<T: Ipc> {
sock_id: u32,
sender: BackendSender<T>,
programs: Rc<HashMap<String, Scope>>,
}
impl<T: Ipc> DatapathTrait for Datapath<T> {
fn get_sock_id(&self) -> u32 {
self.sock_id
}
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope> {
// if the program with this key exists, return it; otherwise return nothing
match self.programs.get(program_name) {
Some(sc) => {
// apply optional updates to values of registers in this scope
let fields: Vec<(Reg, u64)> = fields
.unwrap_or_else(|| &[])
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::changeprog::Msg {
sid: self.sock_id,
program_uid: sc.program_uid,
num_fields: fields.len() as u32,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(sc.clone())
}
_ => Err(Error(format!(
"Map does not contain datapath program with key: {:?}",
program_name
))),
}
}
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> {
let fields: Vec<(Reg, u64)> = update
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::update_field::Msg {
sid: self.sock_id,
num_fields: fields.len() as u8,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(())
}
}
fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()>
where
I: Ipc,
{
let msg = serialize::install::Msg {
sid: sock_id,
program_uid: sc.program_uid,
num_events: bin.events.len() as u32,
num_instrs: bin.instrs.len() as u32,
instrs: bin,
};
let buf = serialize::serialize(&msg)?;
sender.send_msg(&buf[..])?;
Ok(())
}
/// Configuration parameters for the portus runtime.
/// Defines a `slog::Logger` to use for (optional) logging
#[derive(Clone, Default)]
pub struct Config {
pub logger: Option<slog::Logger>,
}
/// The set of information passed by the datapath to CCP
/// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination
/// IP and port), the initial congestion window (`init_cwnd`), and flow MSS.
#[derive(Debug, Clone)]
pub struct DatapathInfo {
pub sock_id: u32,
pub init_cwnd: u32,
pub mss: u32,
pub src_ip: u32,
pub src_port: u32,
pub dst_ip: u32,
pub dst_port: u32,
}
/// Contains the values of the pre-defined Report struct from the fold function.
/// Use `get_field` to query its values using the names defined in the fold function.
pub struct Report {
pub program_uid: u32,
fields: Vec<u64>,
}
impl Report {
/// Uses the `Scope` returned by `lang::compile` (or `install`) to query
/// the `Report` for its values.
pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> {
if sc.program_uid != self.program_uid {
return Err(Error::from(StaleProgramError));
}
match sc.get(field) {
Some(r) => match *r {
Reg::Report(idx, _, _) => {
if idx as usize >= self.fields.len() {
Err(Error::from(InvalidReportError))
} else {
Ok(self.fields[idx as usize])
}
}
_ => Err(Error::from(InvalidRegTypeError)),
},
None => Err(Error::from(FieldNotFoundError)),
}
}
}
/// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and
///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control
/// algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait Flow {
/// This callback specifies the algorithm's behavior when it receives a report
/// of measurements from the datapath.
fn on_report(&mut self, sock_id: u32, m: Report);
/// Optionally specify what the algorithm should do when the flow ends,
/// e.g., clean up any external resources.
/// The default implementation does nothing.
fn close(&mut self) {}
}
impl<T> Flow for Box<T>
where
T: Flow + ?Sized,
{
fn on_report(&mut self, sock_id: u32, m: Report) {
T::on_report(self, sock_id, m)
}
fn close(&mut self) {
T::close(self)
}
}
/// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and
/// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait CongAlg<I: Ipc> {
/// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage
/// an individual connection.
type Flow: Flow;
/// A unique name for the algorithm.
fn name() -> &'static str;
/// `datapath_programs` returns all datapath programs the congestion control algorithm
/// will to use during its execution. It is called once, when Portus initializes
/// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)).
///
/// It should return a vector of string tuples, where the first string in each tuple is a unique name
/// identifying the program, and the second string is the code for the program itself.
///
/// The Portus runtime will panic if any of the datapath programs do not compile.
///
/// For example,
/// ```
/// extern crate fnv;
/// use std::collections::HashMap;
/// let mut h = HashMap::new();
/// h.insert("prog1", "...(program)...".to_string());
/// h.insert("prog2", "...(program)...".to_string());
/// ```
fn datapath_programs(&self) -> HashMap<&'static str, String>;
/// Create a new instance of the CongAlg to manage a new flow.
/// Optionally copy any configuration parameters from `&self`.
fn new_flow(&self, control: Datapath<I>, info: DatapathInfo) -> Self::Flow;
}
/// Structs implementing [`portus::CongAlg`](./trait.CongAlg.html) must also implement this trait
/// (and must be annotated with [`portus_export::register_ccp_alg`]())
///
/// The expected use of this trait in a calling program is as follows:
/// ```no-run
/// let args = CongAlgBuilder::args();
/// let matches = app.get_matches_from(std::env::args_os());
/// let alg = CongAlgBuilder::with_arg_matches(matches);
/// ```
pub trait CongAlgBuilder<'a, 'b> {
/// This function should return a new
/// [`clap::App`](https://docs.rs/clap/2.32.0/clap/struct.App.html) that describes the
/// arguments this algorithm needs to create an instance of itself.
fn args() -> clap::App<'a, 'b>;
/// This function takes as input the set of parsed arguments and uses them to parameterize a
/// new instance of this congestion control algorithm. The matches will be derived from
/// running `Clap::App::get_matches_from` on the `clap::App` returned by the `register` function.
/// It also takes an instsance of a logger so that the calling program can define the logging
/// behavior (eg. format and redirection).
fn with_arg_matches(args: &clap::ArgMatches, logger: Option<slog::Logger>) -> Result<Self>
where
Self: Sized;
}
/// A handle to manage running instances of the CCP execution loop.
#[derive(Debug)]
pub struct CCPHandle {
pub continue_listening: Arc<atomic::AtomicBool>,
pub join_handle: thread::JoinHandle<Result<()>>,
}
impl CCPHandle {
/// Instruct the execution loop to exit.
pub fn kill(&self) {
self.continue_listening
.store(false, atomic::Ordering::SeqCst);
}
// TODO: join_handle.join() returns an Err instead of Ok, because
// some function panicked, this function should return an error
// with the same string from the panic.
/// Collect the error from the thread running the CCP execution loop
/// once it exits.
pub fn wait(self) -> Result<()> {
match self.join_handle.join() {
Ok(r) => r,
Err(_) => Err(Error(String::from("Call to run_inner panicked"))),
}
}
}
/// Main execution loop of CCP for the static pipeline use case.
/// The `run` method blocks 'forever'; it only returns in two cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
///
/// Callers must construct a `BackendBuilder` and a `Config`.
/// Algorithm implementations should
/// 1. Initializes an ipc backendbuilder (depending on the datapath).
/// 2. Calls `run()`, or `spawn() `passing the `BackendBuilder b` and a `Config` with optional
/// logger and command line argument structure.
/// Run() or spawn() create arc<AtomicBool> objects,
/// which are passed into run_inner to build the backend, so spawn() can create a CCPHandle that references this
/// boolean to kill the thread.
pub fn run<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> Result<!>
where
I: Ipc,
U: CongAlg<I>,
{
// call run_inner
match run_inner(
Arc::new(atomic::AtomicBool::new(true)),
backend_builder,
cfg,
alg,
) {
Ok(_) => unreachable!(),
Err(e) => Err(e),
}
}
/// Spawn a thread which will perform the CCP execution loop. Returns
/// a `CCPHandle`, which the caller can use to cause the execution loop
/// to stop.
/// The `run` method blocks 'forever'; it only returns in three cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
/// 3. The caller calls `CCPHandle::kill()`
///
/// See [`run`](./fn.run.html) for more information.
pub fn spawn<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> CCPHandle
where
I: Ipc,
U: CongAlg<I> + 'static + Send,
{
let stop_signal = Arc::new(atomic::AtomicBool::new(true));
CCPHandle {
continue_listening: stop_signal.clone(),
join_handle: thread::spawn(move || run_inner(stop_signal, backend_builder, cfg, alg)),
}
}
// Main execution inner loop of ccp.
// Blocks "forever", or until the iterator stops iterating.
//
// `run_inner()`:
// 1. listens for messages from the datapath
// 2. call the appropriate message in `U: impl CongAlg`
// The function can return for two reasons: an error, or the iterator returned None.
// The latter should only happen for spawn(), and not for run().
// It returns any error, either from:
// 1. the IPC channel failing
// 2. Receiving an install control message (only the datapath should receive these).
fn run_inner<I, U>(
continue_listening: Arc<atomic::AtomicBool>,
backend_builder: BackendBuilder<I>,
cfg: Config,
alg: U,
) -> Result<()>
where
I: Ipc,
U: CongAlg<I>,
{
let mut receive_buf = [0u8; 1024];
let mut b = backend_builder.build(continue_listening.clone(), &mut receive_buf[..]);
let mut flows = HashMap::<u32, U::Flow>::default();
let backend = b.sender();
if let Some(log) = cfg.logger.as_ref() {
info!(log, "starting CCP";
"algorithm" => U::name(),
"ipc" => I::name(),
);
}
let mut scope_map = Rc::new(HashMap::<String, Scope>::default());
let programs = alg.datapath_programs();
for (program_name, program) in programs.iter() {
match lang::compile(program.as_bytes(), &[]) {
Ok((bin, sc)) => {
match send_and_install(0, &backend, bin, &sc) {
Ok(_) => {}
Err(e) => {
return Err(Error(format!(
"Failed to install datapath program \"{}\": {:?}",
program_name, e
)));
}
}
Rc::get_mut(&mut scope_map)
.unwrap()
.insert(program_name.to_string(), sc.clone());
}
Err(e) => {
return Err(Error(format!(
"Datapath program \"{}\" failed to compile: {:?}",
program_name, e
)));
}
}
}
while let Some(msg) = b.next() {
match msg {
Msg::Cr(c) => {
if flows.remove(&c.sid).is_some() {
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "re-creating already created flow"; "sid" => c.sid);
}
}
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "creating new flow";
"sid" => c.sid,
"init_cwnd" => c.init_cwnd,
"mss" => c.mss,
"src_ip" => c.src_ip,
"src_port" => c.src_port,
"dst_ip" => c.dst_ip,
"dst_port" => c.dst_port,
);
}
let f = alg.new_flow(
Datapath {
sock_id: c.sid,
sender: backend.clone(),
programs: scope_map.clone(),
},
DatapathInfo {
sock_id: c.sid,
init_cwnd: c.init_cwnd,
mss: c.mss,
src_ip: c.src_ip,
src_port: c.src_port,
dst_ip: c.dst_ip,
dst_port: c.dst_port,
},
);
flows.insert(c.sid, f);
}
Msg::Ms(m) => {
if flows.contains_key(&m.sid) {
if m.num_fields == 0 {
let mut alg = flows.remove(&m.sid).unwrap();
alg.close();
} else {
let alg = flows.get_mut(&m.sid).unwrap();
alg.on_report(
m.sid,
Report {
program_uid: m.program_uid,
fields: m.fields,
},
)
}
} else if let Some(log) = cfg.logger.as_ref() {
debug!(log, "measurement for unknown flow"; "sid" => m.sid);
}
}
Msg::Ins(_) => {
unreachable!()
//return Err(Error(String::from("The start() listener should never receive an install \
// message, since it is on the CCP side.")));
}
_ => continue,
}
}
// if the thread has been killed, return that as error
if !continue_listening.load(atomic::Ordering::SeqCst) {
Ok(())
} else {
Err(Error(String::from("The IPC channel has closed.")))
}
}
#[cfg(test)]
mod test; | ) -> Result<Scope>;
/// Update the value of a register in an already-installed fold function. | random_line_split |
lib.rs | //! Welcome to CCP.
//!
//! This crate, portus, implements a CCP. This includes:
//! 1. An interface definition for external types wishing to implement congestion control
//! algorithms (`CongAlg`).
//! 2. A [compiler](lang/index.html) for datapath programs.
//! 3. An IPC and serialization [layer](ipc/index.html) for communicating with libccp-compliant datapaths.
//!
//! The entry points into portus are [`run`](./fn.run.html) and [`spawn`](./fn.spawn.html), which start
//! the CCP algorithm runtime. There is also the convenience macro [`start`](./macro.start.html).
//!
//! The runtime listens for datapath messages and dispatches calls to
//! the appropriate congestion control methods.
//!
//! Example
//! =======
//!
//! The following congestion control algorithm sets the congestion window to `42`, and prints the
//! minimum RTT observed over 42 millisecond intervals.
//!
//! ```
//! extern crate fnv;
//! extern crate portus;
//! use std::collections::HashMap;
//! use portus::{CongAlg, Flow, Config, Datapath, DatapathInfo, DatapathTrait, Report};
//! use portus::ipc::Ipc;
//! use portus::lang::Scope;
//! use portus::lang::Bin;
//!
//! #[derive(Clone, Default)]
//! struct MyCongestionControlAlgorithm(Scope);
//!
//! impl<I: Ipc> CongAlg<I> for MyCongestionControlAlgorithm {
//! type Flow = Self;
//!
//! fn name() -> &'static str {
//! "My congestion control algorithm"
//! }
//! fn datapath_programs(&self) -> HashMap<&'static str, String> {
//! let mut h = HashMap::default();
//! h.insert(
//! "MyProgram", "
//! (def (Report
//! (volatile minrtt +infinity)
//! ))
//! (when true
//! (:= Report.minrtt (min Report.minrtt Flow.rtt_sample_us))
//! )
//! (when (> Micros 42000)
//! (report)
//! (reset)
//! )
//! ".to_owned(),
//! );
//! h
//! }
//! fn new_flow(&self, mut control: Datapath<I>, info: DatapathInfo) -> Self::Flow {
//! let sc = control.set_program("MyProgram", None).unwrap();
//! MyCongestionControlAlgorithm(sc)
//! }
//! }
//! impl Flow for MyCongestionControlAlgorithm {
//! fn on_report(&mut self, sock_id: u32, m: Report) {
//! println!("minrtt: {:?}", m.get_field("Report.minrtt", &self.0).unwrap());
//! }
//! }
//! ```
#![feature(box_patterns)]
#![feature(integer_atomics)]
#![feature(never_type)]
#![feature(stmt_expr_attributes)]
#![feature(test)]
extern crate bytes;
extern crate clap;
extern crate crossbeam;
extern crate fnv;
extern crate libc;
extern crate nix;
#[macro_use]
extern crate nom;
extern crate time;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_term;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::{atomic, Arc};
use std::thread;
pub mod ipc;
pub mod lang;
pub mod serialize;
pub mod test_helper;
#[macro_use]
pub mod algs;
mod errors;
pub use crate::errors::*;
use crate::ipc::Ipc;
use crate::ipc::{BackendBuilder, BackendSender};
use crate::lang::{Bin, Reg, Scope};
use crate::serialize::Msg;
/// CCP custom `Result` type, using `Error` as the `Err` type.
pub type Result<T> = std::result::Result<T, Error>;
/// A collection of methods to interact with the datapath.
pub trait DatapathTrait {
fn get_sock_id(&self) -> u32;
/// Tell datapath to use a preinstalled program.
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope>;
/// Update the value of a register in an already-installed fold function.
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>;
}
/// A collection of methods to interact with the datapath.
#[derive(Clone)]
pub struct Datapath<T: Ipc> {
sock_id: u32,
sender: BackendSender<T>,
programs: Rc<HashMap<String, Scope>>,
}
impl<T: Ipc> DatapathTrait for Datapath<T> {
fn get_sock_id(&self) -> u32 {
self.sock_id
}
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope> {
// if the program with this key exists, return it; otherwise return nothing
match self.programs.get(program_name) {
Some(sc) => {
// apply optional updates to values of registers in this scope
let fields: Vec<(Reg, u64)> = fields
.unwrap_or_else(|| &[])
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::changeprog::Msg {
sid: self.sock_id,
program_uid: sc.program_uid,
num_fields: fields.len() as u32,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(sc.clone())
}
_ => Err(Error(format!(
"Map does not contain datapath program with key: {:?}",
program_name
))),
}
}
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> {
let fields: Vec<(Reg, u64)> = update
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::update_field::Msg {
sid: self.sock_id,
num_fields: fields.len() as u8,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(())
}
}
fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()>
where
I: Ipc,
{
let msg = serialize::install::Msg {
sid: sock_id,
program_uid: sc.program_uid,
num_events: bin.events.len() as u32,
num_instrs: bin.instrs.len() as u32,
instrs: bin,
};
let buf = serialize::serialize(&msg)?;
sender.send_msg(&buf[..])?;
Ok(())
}
/// Configuration parameters for the portus runtime.
/// Defines a `slog::Logger` to use for (optional) logging
#[derive(Clone, Default)]
pub struct Config {
pub logger: Option<slog::Logger>,
}
/// The set of information passed by the datapath to CCP
/// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination
/// IP and port), the initial congestion window (`init_cwnd`), and flow MSS.
#[derive(Debug, Clone)]
pub struct | {
pub sock_id: u32,
pub init_cwnd: u32,
pub mss: u32,
pub src_ip: u32,
pub src_port: u32,
pub dst_ip: u32,
pub dst_port: u32,
}
/// Contains the values of the pre-defined Report struct from the fold function.
/// Use `get_field` to query its values using the names defined in the fold function.
pub struct Report {
pub program_uid: u32,
fields: Vec<u64>,
}
impl Report {
/// Uses the `Scope` returned by `lang::compile` (or `install`) to query
/// the `Report` for its values.
pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> {
if sc.program_uid != self.program_uid {
return Err(Error::from(StaleProgramError));
}
match sc.get(field) {
Some(r) => match *r {
Reg::Report(idx, _, _) => {
if idx as usize >= self.fields.len() {
Err(Error::from(InvalidReportError))
} else {
Ok(self.fields[idx as usize])
}
}
_ => Err(Error::from(InvalidRegTypeError)),
},
None => Err(Error::from(FieldNotFoundError)),
}
}
}
/// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and
///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control
/// algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait Flow {
/// This callback specifies the algorithm's behavior when it receives a report
/// of measurements from the datapath.
fn on_report(&mut self, sock_id: u32, m: Report);
/// Optionally specify what the algorithm should do when the flow ends,
/// e.g., clean up any external resources.
/// The default implementation does nothing.
fn close(&mut self) {}
}
impl<T> Flow for Box<T>
where
T: Flow + ?Sized,
{
fn on_report(&mut self, sock_id: u32, m: Report) {
T::on_report(self, sock_id, m)
}
fn close(&mut self) {
T::close(self)
}
}
/// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and
/// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait CongAlg<I: Ipc> {
/// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage
/// an individual connection.
type Flow: Flow;
/// A unique name for the algorithm.
fn name() -> &'static str;
/// `datapath_programs` returns all datapath programs the congestion control algorithm
/// will to use during its execution. It is called once, when Portus initializes
/// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)).
///
/// It should return a vector of string tuples, where the first string in each tuple is a unique name
/// identifying the program, and the second string is the code for the program itself.
///
/// The Portus runtime will panic if any of the datapath programs do not compile.
///
/// For example,
/// ```
/// extern crate fnv;
/// use std::collections::HashMap;
/// let mut h = HashMap::new();
/// h.insert("prog1", "...(program)...".to_string());
/// h.insert("prog2", "...(program)...".to_string());
/// ```
fn datapath_programs(&self) -> HashMap<&'static str, String>;
/// Create a new instance of the CongAlg to manage a new flow.
/// Optionally copy any configuration parameters from `&self`.
fn new_flow(&self, control: Datapath<I>, info: DatapathInfo) -> Self::Flow;
}
/// Structs implementing [`portus::CongAlg`](./trait.CongAlg.html) must also implement this trait
/// (and must be annotated with [`portus_export::register_ccp_alg`]())
///
/// The expected use of this trait in a calling program is as follows:
/// ```no-run
/// let args = CongAlgBuilder::args();
/// let matches = app.get_matches_from(std::env::args_os());
/// let alg = CongAlgBuilder::with_arg_matches(matches);
/// ```
pub trait CongAlgBuilder<'a, 'b> {
/// This function should return a new
/// [`clap::App`](https://docs.rs/clap/2.32.0/clap/struct.App.html) that describes the
/// arguments this algorithm needs to create an instance of itself.
fn args() -> clap::App<'a, 'b>;
/// This function takes as input the set of parsed arguments and uses them to parameterize a
/// new instance of this congestion control algorithm. The matches will be derived from
/// running `Clap::App::get_matches_from` on the `clap::App` returned by the `register` function.
/// It also takes an instsance of a logger so that the calling program can define the logging
/// behavior (eg. format and redirection).
fn with_arg_matches(args: &clap::ArgMatches, logger: Option<slog::Logger>) -> Result<Self>
where
Self: Sized;
}
/// A handle to manage running instances of the CCP execution loop.
#[derive(Debug)]
pub struct CCPHandle {
pub continue_listening: Arc<atomic::AtomicBool>,
pub join_handle: thread::JoinHandle<Result<()>>,
}
impl CCPHandle {
/// Instruct the execution loop to exit.
pub fn kill(&self) {
self.continue_listening
.store(false, atomic::Ordering::SeqCst);
}
// TODO: join_handle.join() returns an Err instead of Ok, because
// some function panicked, this function should return an error
// with the same string from the panic.
/// Collect the error from the thread running the CCP execution loop
/// once it exits.
pub fn wait(self) -> Result<()> {
match self.join_handle.join() {
Ok(r) => r,
Err(_) => Err(Error(String::from("Call to run_inner panicked"))),
}
}
}
/// Main execution loop of CCP for the static pipeline use case.
/// The `run` method blocks 'forever'; it only returns in two cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
///
/// Callers must construct a `BackendBuilder` and a `Config`.
/// Algorithm implementations should
/// 1. Initializes an ipc backendbuilder (depending on the datapath).
/// 2. Calls `run()`, or `spawn() `passing the `BackendBuilder b` and a `Config` with optional
/// logger and command line argument structure.
/// Run() or spawn() create arc<AtomicBool> objects,
/// which are passed into run_inner to build the backend, so spawn() can create a CCPHandle that references this
/// boolean to kill the thread.
pub fn run<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> Result<!>
where
I: Ipc,
U: CongAlg<I>,
{
// call run_inner
match run_inner(
Arc::new(atomic::AtomicBool::new(true)),
backend_builder,
cfg,
alg,
) {
Ok(_) => unreachable!(),
Err(e) => Err(e),
}
}
/// Spawn a thread which will perform the CCP execution loop. Returns
/// a `CCPHandle`, which the caller can use to cause the execution loop
/// to stop.
/// The `run` method blocks 'forever'; it only returns in three cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
/// 3. The caller calls `CCPHandle::kill()`
///
/// See [`run`](./fn.run.html) for more information.
pub fn spawn<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> CCPHandle
where
I: Ipc,
U: CongAlg<I> + 'static + Send,
{
let stop_signal = Arc::new(atomic::AtomicBool::new(true));
CCPHandle {
continue_listening: stop_signal.clone(),
join_handle: thread::spawn(move || run_inner(stop_signal, backend_builder, cfg, alg)),
}
}
// Main execution inner loop of ccp.
// Blocks "forever", or until the iterator stops iterating.
//
// `run_inner()`:
// 1. listens for messages from the datapath
// 2. call the appropriate message in `U: impl CongAlg`
// The function can return for two reasons: an error, or the iterator returned None.
// The latter should only happen for spawn(), and not for run().
// It returns any error, either from:
// 1. the IPC channel failing
// 2. Receiving an install control message (only the datapath should receive these).
fn run_inner<I, U>(
continue_listening: Arc<atomic::AtomicBool>,
backend_builder: BackendBuilder<I>,
cfg: Config,
alg: U,
) -> Result<()>
where
I: Ipc,
U: CongAlg<I>,
{
let mut receive_buf = [0u8; 1024];
let mut b = backend_builder.build(continue_listening.clone(), &mut receive_buf[..]);
let mut flows = HashMap::<u32, U::Flow>::default();
let backend = b.sender();
if let Some(log) = cfg.logger.as_ref() {
info!(log, "starting CCP";
"algorithm" => U::name(),
"ipc" => I::name(),
);
}
let mut scope_map = Rc::new(HashMap::<String, Scope>::default());
let programs = alg.datapath_programs();
for (program_name, program) in programs.iter() {
match lang::compile(program.as_bytes(), &[]) {
Ok((bin, sc)) => {
match send_and_install(0, &backend, bin, &sc) {
Ok(_) => {}
Err(e) => {
return Err(Error(format!(
"Failed to install datapath program \"{}\": {:?}",
program_name, e
)));
}
}
Rc::get_mut(&mut scope_map)
.unwrap()
.insert(program_name.to_string(), sc.clone());
}
Err(e) => {
return Err(Error(format!(
"Datapath program \"{}\" failed to compile: {:?}",
program_name, e
)));
}
}
}
while let Some(msg) = b.next() {
match msg {
Msg::Cr(c) => {
if flows.remove(&c.sid).is_some() {
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "re-creating already created flow"; "sid" => c.sid);
}
}
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "creating new flow";
"sid" => c.sid,
"init_cwnd" => c.init_cwnd,
"mss" => c.mss,
"src_ip" => c.src_ip,
"src_port" => c.src_port,
"dst_ip" => c.dst_ip,
"dst_port" => c.dst_port,
);
}
let f = alg.new_flow(
Datapath {
sock_id: c.sid,
sender: backend.clone(),
programs: scope_map.clone(),
},
DatapathInfo {
sock_id: c.sid,
init_cwnd: c.init_cwnd,
mss: c.mss,
src_ip: c.src_ip,
src_port: c.src_port,
dst_ip: c.dst_ip,
dst_port: c.dst_port,
},
);
flows.insert(c.sid, f);
}
Msg::Ms(m) => {
if flows.contains_key(&m.sid) {
if m.num_fields == 0 {
let mut alg = flows.remove(&m.sid).unwrap();
alg.close();
} else {
let alg = flows.get_mut(&m.sid).unwrap();
alg.on_report(
m.sid,
Report {
program_uid: m.program_uid,
fields: m.fields,
},
)
}
} else if let Some(log) = cfg.logger.as_ref() {
debug!(log, "measurement for unknown flow"; "sid" => m.sid);
}
}
Msg::Ins(_) => {
unreachable!()
//return Err(Error(String::from("The start() listener should never receive an install \
// message, since it is on the CCP side.")));
}
_ => continue,
}
}
// if the thread has been killed, return that as error
if !continue_listening.load(atomic::Ordering::SeqCst) {
Ok(())
} else {
Err(Error(String::from("The IPC channel has closed.")))
}
}
#[cfg(test)]
mod test;
| DatapathInfo | identifier_name |
lib.rs | //! Welcome to CCP.
//!
//! This crate, portus, implements a CCP. This includes:
//! 1. An interface definition for external types wishing to implement congestion control
//! algorithms (`CongAlg`).
//! 2. A [compiler](lang/index.html) for datapath programs.
//! 3. An IPC and serialization [layer](ipc/index.html) for communicating with libccp-compliant datapaths.
//!
//! The entry points into portus are [`run`](./fn.run.html) and [`spawn`](./fn.spawn.html), which start
//! the CCP algorithm runtime. There is also the convenience macro [`start`](./macro.start.html).
//!
//! The runtime listens for datapath messages and dispatches calls to
//! the appropriate congestion control methods.
//!
//! Example
//! =======
//!
//! The following congestion control algorithm sets the congestion window to `42`, and prints the
//! minimum RTT observed over 42 millisecond intervals.
//!
//! ```
//! extern crate fnv;
//! extern crate portus;
//! use std::collections::HashMap;
//! use portus::{CongAlg, Flow, Config, Datapath, DatapathInfo, DatapathTrait, Report};
//! use portus::ipc::Ipc;
//! use portus::lang::Scope;
//! use portus::lang::Bin;
//!
//! #[derive(Clone, Default)]
//! struct MyCongestionControlAlgorithm(Scope);
//!
//! impl<I: Ipc> CongAlg<I> for MyCongestionControlAlgorithm {
//! type Flow = Self;
//!
//! fn name() -> &'static str {
//! "My congestion control algorithm"
//! }
//! fn datapath_programs(&self) -> HashMap<&'static str, String> {
//! let mut h = HashMap::default();
//! h.insert(
//! "MyProgram", "
//! (def (Report
//! (volatile minrtt +infinity)
//! ))
//! (when true
//! (:= Report.minrtt (min Report.minrtt Flow.rtt_sample_us))
//! )
//! (when (> Micros 42000)
//! (report)
//! (reset)
//! )
//! ".to_owned(),
//! );
//! h
//! }
//! fn new_flow(&self, mut control: Datapath<I>, info: DatapathInfo) -> Self::Flow {
//! let sc = control.set_program("MyProgram", None).unwrap();
//! MyCongestionControlAlgorithm(sc)
//! }
//! }
//! impl Flow for MyCongestionControlAlgorithm {
//! fn on_report(&mut self, sock_id: u32, m: Report) {
//! println!("minrtt: {:?}", m.get_field("Report.minrtt", &self.0).unwrap());
//! }
//! }
//! ```
#![feature(box_patterns)]
#![feature(integer_atomics)]
#![feature(never_type)]
#![feature(stmt_expr_attributes)]
#![feature(test)]
extern crate bytes;
extern crate clap;
extern crate crossbeam;
extern crate fnv;
extern crate libc;
extern crate nix;
#[macro_use]
extern crate nom;
extern crate time;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_term;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::{atomic, Arc};
use std::thread;
pub mod ipc;
pub mod lang;
pub mod serialize;
pub mod test_helper;
#[macro_use]
pub mod algs;
mod errors;
pub use crate::errors::*;
use crate::ipc::Ipc;
use crate::ipc::{BackendBuilder, BackendSender};
use crate::lang::{Bin, Reg, Scope};
use crate::serialize::Msg;
/// CCP custom `Result` type, using `Error` as the `Err` type.
pub type Result<T> = std::result::Result<T, Error>;
/// A collection of methods to interact with the datapath.
pub trait DatapathTrait {
fn get_sock_id(&self) -> u32;
/// Tell datapath to use a preinstalled program.
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope>;
/// Update the value of a register in an already-installed fold function.
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()>;
}
/// A collection of methods to interact with the datapath.
#[derive(Clone)]
pub struct Datapath<T: Ipc> {
sock_id: u32,
sender: BackendSender<T>,
programs: Rc<HashMap<String, Scope>>,
}
impl<T: Ipc> DatapathTrait for Datapath<T> {
fn get_sock_id(&self) -> u32 {
self.sock_id
}
fn set_program(
&mut self,
program_name: &'static str,
fields: Option<&[(&str, u32)]>,
) -> Result<Scope> {
// if the program with this key exists, return it; otherwise return nothing
match self.programs.get(program_name) {
Some(sc) => {
// apply optional updates to values of registers in this scope
let fields: Vec<(Reg, u64)> = fields
.unwrap_or_else(|| &[])
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::changeprog::Msg {
sid: self.sock_id,
program_uid: sc.program_uid,
num_fields: fields.len() as u32,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(sc.clone())
}
_ => Err(Error(format!(
"Map does not contain datapath program with key: {:?}",
program_name
))),
}
}
fn update_field(&self, sc: &Scope, update: &[(&str, u32)]) -> Result<()> {
let fields: Vec<(Reg, u64)> = update
.iter()
.map(|&(reg_name, new_value)| {
if reg_name.starts_with("__") {
return Err(Error(format!(
"Cannot update reserved field: {:?}",
reg_name
)));
}
sc.get(reg_name)
.ok_or_else(|| Error(format!("Unknown field: {:?}", reg_name)))
.and_then(|reg| match *reg {
Reg::Control(idx, ref t) => {
Ok((Reg::Control(idx, t.clone()), u64::from(new_value)))
}
Reg::Implicit(idx, ref t) if idx == 4 || idx == 5 => {
Ok((Reg::Implicit(idx, t.clone()), u64::from(new_value)))
}
_ => Err(Error(format!("Cannot update field: {:?}", reg_name))),
})
})
.collect::<Result<_>>()?;
let msg = serialize::update_field::Msg {
sid: self.sock_id,
num_fields: fields.len() as u8,
fields,
};
let buf = serialize::serialize(&msg)?;
self.sender.send_msg(&buf[..])?;
Ok(())
}
}
fn send_and_install<I>(sock_id: u32, sender: &BackendSender<I>, bin: Bin, sc: &Scope) -> Result<()>
where
I: Ipc,
|
/// Configuration parameters for the portus runtime.
/// Defines a `slog::Logger` to use for (optional) logging
#[derive(Clone, Default)]
pub struct Config {
pub logger: Option<slog::Logger>,
}
/// The set of information passed by the datapath to CCP
/// when a connection starts. It includes a unique 5-tuple (CCP socket id + source and destination
/// IP and port), the initial congestion window (`init_cwnd`), and flow MSS.
#[derive(Debug, Clone)]
pub struct DatapathInfo {
pub sock_id: u32,
pub init_cwnd: u32,
pub mss: u32,
pub src_ip: u32,
pub src_port: u32,
pub dst_ip: u32,
pub dst_port: u32,
}
/// Contains the values of the pre-defined Report struct from the fold function.
/// Use `get_field` to query its values using the names defined in the fold function.
pub struct Report {
pub program_uid: u32,
fields: Vec<u64>,
}
impl Report {
/// Uses the `Scope` returned by `lang::compile` (or `install`) to query
/// the `Report` for its values.
pub fn get_field(&self, field: &str, sc: &Scope) -> Result<u64> {
if sc.program_uid != self.program_uid {
return Err(Error::from(StaleProgramError));
}
match sc.get(field) {
Some(r) => match *r {
Reg::Report(idx, _, _) => {
if idx as usize >= self.fields.len() {
Err(Error::from(InvalidReportError))
} else {
Ok(self.fields[idx as usize])
}
}
_ => Err(Error::from(InvalidRegTypeError)),
},
None => Err(Error::from(FieldNotFoundError)),
}
}
}
/// Implement this trait, [`portus::CongAlg`](./trait.CongAlg.html), and
///[`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) to define a CCP congestion control
/// algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait Flow {
/// This callback specifies the algorithm's behavior when it receives a report
/// of measurements from the datapath.
fn on_report(&mut self, sock_id: u32, m: Report);
/// Optionally specify what the algorithm should do when the flow ends,
/// e.g., clean up any external resources.
/// The default implementation does nothing.
fn close(&mut self) {}
}
impl<T> Flow for Box<T>
where
T: Flow + ?Sized,
{
fn on_report(&mut self, sock_id: u32, m: Report) {
T::on_report(self, sock_id, m)
}
fn close(&mut self) {
T::close(self)
}
}
/// implement this trait, [`portus::CongAlgBuilder`](./trait.CongAlgBuilder.html) and
/// [`portus::Flow`](./trait.Flow.html) to define a ccp congestion control algorithm.
///
/// * `CongAlg` implements functionality which applies to a given algorithm as a whole
/// * `Flow` implements functionality specific to an individual flow
/// * `CongAlgBuilder` specifies how the trait that implements `CongAlg` should be built
/// from given command-line arguments.
pub trait CongAlg<I: Ipc> {
/// A type which implements the [`portus::Flow`](./trait.Flow.html) trait, to manage
/// an individual connection.
type Flow: Flow;
/// A unique name for the algorithm.
fn name() -> &'static str;
/// `datapath_programs` returns all datapath programs the congestion control algorithm
/// will to use during its execution. It is called once, when Portus initializes
/// ([`portus::run`](./fn.run.html) or [`portus::spawn`](./fn.spawn.html)).
///
/// It should return a vector of string tuples, where the first string in each tuple is a unique name
/// identifying the program, and the second string is the code for the program itself.
///
/// The Portus runtime will panic if any of the datapath programs do not compile.
///
/// For example,
/// ```
/// extern crate fnv;
/// use std::collections::HashMap;
/// let mut h = HashMap::new();
/// h.insert("prog1", "...(program)...".to_string());
/// h.insert("prog2", "...(program)...".to_string());
/// ```
fn datapath_programs(&self) -> HashMap<&'static str, String>;
/// Create a new instance of the CongAlg to manage a new flow.
/// Optionally copy any configuration parameters from `&self`.
fn new_flow(&self, control: Datapath<I>, info: DatapathInfo) -> Self::Flow;
}
/// Structs implementing [`portus::CongAlg`](./trait.CongAlg.html) must also implement this trait
/// (and must be annotated with [`portus_export::register_ccp_alg`]())
///
/// The expected use of this trait in a calling program is as follows:
/// ```no-run
/// let args = CongAlgBuilder::args();
/// let matches = app.get_matches_from(std::env::args_os());
/// let alg = CongAlgBuilder::with_arg_matches(matches);
/// ```
pub trait CongAlgBuilder<'a, 'b> {
/// This function should return a new
/// [`clap::App`](https://docs.rs/clap/2.32.0/clap/struct.App.html) that describes the
/// arguments this algorithm needs to create an instance of itself.
fn args() -> clap::App<'a, 'b>;
/// This function takes as input the set of parsed arguments and uses them to parameterize a
/// new instance of this congestion control algorithm. The matches will be derived from
/// running `Clap::App::get_matches_from` on the `clap::App` returned by the `register` function.
/// It also takes an instsance of a logger so that the calling program can define the logging
/// behavior (eg. format and redirection).
fn with_arg_matches(args: &clap::ArgMatches, logger: Option<slog::Logger>) -> Result<Self>
where
Self: Sized;
}
/// A handle to manage running instances of the CCP execution loop.
#[derive(Debug)]
pub struct CCPHandle {
pub continue_listening: Arc<atomic::AtomicBool>,
pub join_handle: thread::JoinHandle<Result<()>>,
}
impl CCPHandle {
/// Instruct the execution loop to exit.
pub fn kill(&self) {
self.continue_listening
.store(false, atomic::Ordering::SeqCst);
}
// TODO: join_handle.join() returns an Err instead of Ok, because
// some function panicked, this function should return an error
// with the same string from the panic.
/// Collect the error from the thread running the CCP execution loop
/// once it exits.
pub fn wait(self) -> Result<()> {
match self.join_handle.join() {
Ok(r) => r,
Err(_) => Err(Error(String::from("Call to run_inner panicked"))),
}
}
}
/// Main execution loop of CCP for the static pipeline use case.
/// The `run` method blocks 'forever'; it only returns in two cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
///
/// Callers must construct a `BackendBuilder` and a `Config`.
/// Algorithm implementations should
/// 1. Initializes an ipc backendbuilder (depending on the datapath).
/// 2. Calls `run()`, or `spawn() `passing the `BackendBuilder b` and a `Config` with optional
/// logger and command line argument structure.
/// Run() or spawn() create arc<AtomicBool> objects,
/// which are passed into run_inner to build the backend, so spawn() can create a CCPHandle that references this
/// boolean to kill the thread.
pub fn run<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> Result<!>
where
I: Ipc,
U: CongAlg<I>,
{
// call run_inner
match run_inner(
Arc::new(atomic::AtomicBool::new(true)),
backend_builder,
cfg,
alg,
) {
Ok(_) => unreachable!(),
Err(e) => Err(e),
}
}
/// Spawn a thread which will perform the CCP execution loop. Returns
/// a `CCPHandle`, which the caller can use to cause the execution loop
/// to stop.
/// The `run` method blocks 'forever'; it only returns in three cases:
/// 1. The IPC socket is closed.
/// 2. An invalid message is received.
/// 3. The caller calls `CCPHandle::kill()`
///
/// See [`run`](./fn.run.html) for more information.
pub fn spawn<I, U>(backend_builder: BackendBuilder<I>, cfg: Config, alg: U) -> CCPHandle
where
I: Ipc,
U: CongAlg<I> + 'static + Send,
{
let stop_signal = Arc::new(atomic::AtomicBool::new(true));
CCPHandle {
continue_listening: stop_signal.clone(),
join_handle: thread::spawn(move || run_inner(stop_signal, backend_builder, cfg, alg)),
}
}
// Main execution inner loop of ccp.
// Blocks "forever", or until the iterator stops iterating.
//
// `run_inner()`:
// 1. listens for messages from the datapath
// 2. call the appropriate message in `U: impl CongAlg`
// The function can return for two reasons: an error, or the iterator returned None.
// The latter should only happen for spawn(), and not for run().
// It returns any error, either from:
// 1. the IPC channel failing
// 2. Receiving an install control message (only the datapath should receive these).
fn run_inner<I, U>(
continue_listening: Arc<atomic::AtomicBool>,
backend_builder: BackendBuilder<I>,
cfg: Config,
alg: U,
) -> Result<()>
where
I: Ipc,
U: CongAlg<I>,
{
let mut receive_buf = [0u8; 1024];
let mut b = backend_builder.build(continue_listening.clone(), &mut receive_buf[..]);
let mut flows = HashMap::<u32, U::Flow>::default();
let backend = b.sender();
if let Some(log) = cfg.logger.as_ref() {
info!(log, "starting CCP";
"algorithm" => U::name(),
"ipc" => I::name(),
);
}
let mut scope_map = Rc::new(HashMap::<String, Scope>::default());
let programs = alg.datapath_programs();
for (program_name, program) in programs.iter() {
match lang::compile(program.as_bytes(), &[]) {
Ok((bin, sc)) => {
match send_and_install(0, &backend, bin, &sc) {
Ok(_) => {}
Err(e) => {
return Err(Error(format!(
"Failed to install datapath program \"{}\": {:?}",
program_name, e
)));
}
}
Rc::get_mut(&mut scope_map)
.unwrap()
.insert(program_name.to_string(), sc.clone());
}
Err(e) => {
return Err(Error(format!(
"Datapath program \"{}\" failed to compile: {:?}",
program_name, e
)));
}
}
}
while let Some(msg) = b.next() {
match msg {
Msg::Cr(c) => {
if flows.remove(&c.sid).is_some() {
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "re-creating already created flow"; "sid" => c.sid);
}
}
if let Some(log) = cfg.logger.as_ref() {
debug!(log, "creating new flow";
"sid" => c.sid,
"init_cwnd" => c.init_cwnd,
"mss" => c.mss,
"src_ip" => c.src_ip,
"src_port" => c.src_port,
"dst_ip" => c.dst_ip,
"dst_port" => c.dst_port,
);
}
let f = alg.new_flow(
Datapath {
sock_id: c.sid,
sender: backend.clone(),
programs: scope_map.clone(),
},
DatapathInfo {
sock_id: c.sid,
init_cwnd: c.init_cwnd,
mss: c.mss,
src_ip: c.src_ip,
src_port: c.src_port,
dst_ip: c.dst_ip,
dst_port: c.dst_port,
},
);
flows.insert(c.sid, f);
}
Msg::Ms(m) => {
if flows.contains_key(&m.sid) {
if m.num_fields == 0 {
let mut alg = flows.remove(&m.sid).unwrap();
alg.close();
} else {
let alg = flows.get_mut(&m.sid).unwrap();
alg.on_report(
m.sid,
Report {
program_uid: m.program_uid,
fields: m.fields,
},
)
}
} else if let Some(log) = cfg.logger.as_ref() {
debug!(log, "measurement for unknown flow"; "sid" => m.sid);
}
}
Msg::Ins(_) => {
unreachable!()
//return Err(Error(String::from("The start() listener should never receive an install \
// message, since it is on the CCP side.")));
}
_ => continue,
}
}
// if the thread has been killed, return that as error
if !continue_listening.load(atomic::Ordering::SeqCst) {
Ok(())
} else {
Err(Error(String::from("The IPC channel has closed.")))
}
}
#[cfg(test)]
mod test;
| {
let msg = serialize::install::Msg {
sid: sock_id,
program_uid: sc.program_uid,
num_events: bin.events.len() as u32,
num_instrs: bin.instrs.len() as u32,
instrs: bin,
};
let buf = serialize::serialize(&msg)?;
sender.send_msg(&buf[..])?;
Ok(())
} | identifier_body |
models.py | """passbook core models"""
from datetime import timedelta
from random import SystemRandom
from time import sleep
from typing import Any, Optional
from uuid import uuid4
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.db import models
from django.http import HttpRequest
from django.urls import reverse_lazy
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django_prometheus.models import ExportModelOperationsMixin
from guardian.mixins import GuardianUserMixin
from jinja2 import Undefined
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.nativetypes import NativeEnvironment
from model_utils.managers import InheritanceManager
from structlog import get_logger
from passbook.core.exceptions import PropertyMappingExpressionException
from passbook.core.signals import password_changed
from passbook.core.types import UILoginButton, UIUserSettings
from passbook.lib.models import CreatedUpdatedModel, UUIDModel
from passbook.policies.exceptions import PolicyException
from passbook.policies.types import PolicyRequest, PolicyResult
LOGGER = get_logger()
NATIVE_ENVIRONMENT = NativeEnvironment()
def default_nonce_duration():
"""Default duration a Nonce is valid"""
return now() + timedelta(hours=4)
class Group(ExportModelOperationsMixin("group"), UUIDModel):
"""Custom Group model which supports a basic hierarchy"""
name = models.CharField(_("name"), max_length=80)
parent = models.ForeignKey(
"Group",
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="children",
)
attributes = JSONField(default=dict, blank=True)
def __str__(self):
return f"Group {self.name}"
class Meta:
unique_together = (("name", "parent",),)
class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser):
"""Custom User model to allow easier adding o f user-based settings"""
uuid = models.UUIDField(default=uuid4, editable=False)
name = models.TextField(help_text=_("User's display name."))
sources = models.ManyToManyField("Source", through="UserSourceConnection")
groups = models.ManyToManyField("Group")
password_change_date = models.DateTimeField(auto_now_add=True)
attributes = JSONField(default=dict, blank=True)
def set_password(self, password):
if self.pk:
password_changed.send(sender=self, user=self, password=password)
self.password_change_date = now()
return super().set_password(password)
class | :
permissions = (("reset_user_password", "Reset Password"),)
class Provider(ExportModelOperationsMixin("provider"), models.Model):
"""Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application"""
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
objects = InheritanceManager()
# This class defines no field for easier inheritance
def __str__(self):
if hasattr(self, "name"):
return getattr(self, "name")
return super().__str__()
class PolicyModel(UUIDModel, CreatedUpdatedModel):
"""Base model which can have policies applied to it"""
policies = models.ManyToManyField("Policy", blank=True)
class Factor(ExportModelOperationsMixin("factor"), PolicyModel):
"""Authentication factor, multiple instances of the same Factor can be used"""
name = models.TextField(help_text=_("Factor's display Name."))
slug = models.SlugField(
unique=True, help_text=_("Internal factor name, used in URLs.")
)
order = models.IntegerField()
enabled = models.BooleanField(default=True)
objects = InheritanceManager()
type = ""
form = ""
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return f"Factor {self.slug}"
class Application(ExportModelOperationsMixin("application"), PolicyModel):
"""Every Application which uses passbook for authentication/identification/authorization
needs an Application record. Other authentication types can subclass this Model to
add custom fields and other properties"""
name = models.TextField(help_text=_("Application's display Name."))
slug = models.SlugField(help_text=_("Internal application name, used in URLs."))
skip_authorization = models.BooleanField(default=False)
provider = models.OneToOneField(
"Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT
)
meta_launch_url = models.URLField(default="", blank=True)
meta_icon_url = models.TextField(default="", blank=True)
meta_description = models.TextField(default="", blank=True)
meta_publisher = models.TextField(default="", blank=True)
objects = InheritanceManager()
def get_provider(self) -> Optional[Provider]:
"""Get casted provider instance"""
if not self.provider:
return None
return Provider.objects.get_subclass(pk=self.provider.pk)
def __str__(self):
return self.name
class Source(ExportModelOperationsMixin("source"), PolicyModel):
"""Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server"""
name = models.TextField(help_text=_("Source's display Name."))
slug = models.SlugField(help_text=_("Internal source name, used in URLs."))
enabled = models.BooleanField(default=True)
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
form = "" # ModelForm-based class ued to create/edit instance
objects = InheritanceManager()
@property
def ui_login_button(self) -> Optional[UILoginButton]:
"""If source uses a http-based flow, return UI Information about the login
button. If source doesn't use http-based flow, return None."""
return None
@property
def ui_additional_info(self) -> Optional[str]:
"""Return additional Info, such as a callback URL. Show in the administration interface."""
return None
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return self.name
class UserSourceConnection(CreatedUpdatedModel):
"""Connection between User and Source."""
user = models.ForeignKey(User, on_delete=models.CASCADE)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
class Meta:
unique_together = (("user", "source"),)
class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel):
"""Policies which specify if a user is authorized to use an Application. Can be overridden by
other types to add other fields, more logic, etc."""
name = models.TextField(blank=True, null=True)
negate = models.BooleanField(default=False)
order = models.IntegerField(default=0)
timeout = models.IntegerField(default=30)
objects = InheritanceManager()
def __str__(self):
return f"Policy {self.name}"
def passes(self, request: PolicyRequest) -> PolicyResult:
"""Check if user instance passes this policy"""
raise PolicyException()
class DebugPolicy(Policy):
"""Policy used for debugging the PolicyEngine. Returns a fixed result,
but takes a random time to process."""
result = models.BooleanField(default=False)
wait_min = models.IntegerField(default=5)
wait_max = models.IntegerField(default=30)
form = "passbook.core.forms.policies.DebugPolicyForm"
def passes(self, request: PolicyRequest) -> PolicyResult:
"""Wait random time then return result"""
wait = SystemRandom().randrange(self.wait_min, self.wait_max)
LOGGER.debug("Policy waiting", policy=self, delay=wait)
sleep(wait)
return PolicyResult(self.result, "Debugging")
class Meta:
verbose_name = _("Debug Policy")
verbose_name_plural = _("Debug Policies")
class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel):
"""Single-use invitation link"""
created_by = models.ForeignKey("User", on_delete=models.CASCADE)
expires = models.DateTimeField(default=None, blank=True, null=True)
fixed_username = models.TextField(blank=True, default=None)
fixed_email = models.TextField(blank=True, default=None)
needs_confirmation = models.BooleanField(default=True)
@property
def link(self):
"""Get link to use invitation"""
return (
reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}"
)
def __str__(self):
return f"Invitation {self.uuid.hex} created by {self.created_by}"
class Meta:
verbose_name = _("Invitation")
verbose_name_plural = _("Invitations")
class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel):
"""One-time link for password resets/sign-up-confirmations"""
expires = models.DateTimeField(default=default_nonce_duration)
user = models.ForeignKey("User", on_delete=models.CASCADE)
expiring = models.BooleanField(default=True)
description = models.TextField(default="", blank=True)
@property
def is_expired(self) -> bool:
"""Check if nonce is expired yet."""
return now() > self.expires
def __str__(self):
return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})"
class Meta:
verbose_name = _("Nonce")
verbose_name_plural = _("Nonces")
class PropertyMapping(UUIDModel):
"""User-defined key -> x mapping which can be used by providers to expose extra data."""
name = models.TextField()
expression = models.TextField()
form = ""
objects = InheritanceManager()
def evaluate(
self, user: Optional[User], request: Optional[HttpRequest], **kwargs
) -> Any:
"""Evaluate `self.expression` using `**kwargs` as Context."""
try:
expression = NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise PropertyMappingExpressionException from exc
try:
response = expression.render(user=user, request=request, **kwargs)
if isinstance(response, Undefined):
raise PropertyMappingExpressionException("Response was 'Undefined'")
return response
except UndefinedError as exc:
raise PropertyMappingExpressionException from exc
def save(self, *args, **kwargs):
try:
NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise ValidationError("Expression Syntax Error") from exc
return super().save(*args, **kwargs)
def __str__(self):
return f"Property Mapping {self.name}"
class Meta:
verbose_name = _("Property Mapping")
verbose_name_plural = _("Property Mappings")
| Meta | identifier_name |
models.py | """passbook core models"""
from datetime import timedelta
from random import SystemRandom
from time import sleep
from typing import Any, Optional
from uuid import uuid4
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.db import models
from django.http import HttpRequest
from django.urls import reverse_lazy
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django_prometheus.models import ExportModelOperationsMixin
from guardian.mixins import GuardianUserMixin
from jinja2 import Undefined
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.nativetypes import NativeEnvironment
from model_utils.managers import InheritanceManager
from structlog import get_logger
from passbook.core.exceptions import PropertyMappingExpressionException
from passbook.core.signals import password_changed
from passbook.core.types import UILoginButton, UIUserSettings
from passbook.lib.models import CreatedUpdatedModel, UUIDModel
from passbook.policies.exceptions import PolicyException
from passbook.policies.types import PolicyRequest, PolicyResult
LOGGER = get_logger()
NATIVE_ENVIRONMENT = NativeEnvironment()
def default_nonce_duration():
"""Default duration a Nonce is valid"""
return now() + timedelta(hours=4)
class Group(ExportModelOperationsMixin("group"), UUIDModel):
"""Custom Group model which supports a basic hierarchy"""
name = models.CharField(_("name"), max_length=80)
parent = models.ForeignKey(
"Group",
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="children",
)
attributes = JSONField(default=dict, blank=True)
def __str__(self):
return f"Group {self.name}"
class Meta:
unique_together = (("name", "parent",),)
class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser):
"""Custom User model to allow easier adding o f user-based settings"""
uuid = models.UUIDField(default=uuid4, editable=False)
name = models.TextField(help_text=_("User's display name."))
sources = models.ManyToManyField("Source", through="UserSourceConnection")
groups = models.ManyToManyField("Group")
password_change_date = models.DateTimeField(auto_now_add=True)
attributes = JSONField(default=dict, blank=True)
def set_password(self, password):
if self.pk:
password_changed.send(sender=self, user=self, password=password)
self.password_change_date = now()
return super().set_password(password)
class Meta:
permissions = (("reset_user_password", "Reset Password"),)
class Provider(ExportModelOperationsMixin("provider"), models.Model):
"""Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application"""
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
objects = InheritanceManager()
# This class defines no field for easier inheritance
def __str__(self):
if hasattr(self, "name"):
return getattr(self, "name")
return super().__str__()
class PolicyModel(UUIDModel, CreatedUpdatedModel):
"""Base model which can have policies applied to it"""
policies = models.ManyToManyField("Policy", blank=True)
class Factor(ExportModelOperationsMixin("factor"), PolicyModel):
"""Authentication factor, multiple instances of the same Factor can be used"""
name = models.TextField(help_text=_("Factor's display Name."))
slug = models.SlugField(
unique=True, help_text=_("Internal factor name, used in URLs.")
)
order = models.IntegerField()
enabled = models.BooleanField(default=True)
objects = InheritanceManager()
type = ""
form = ""
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return f"Factor {self.slug}"
class Application(ExportModelOperationsMixin("application"), PolicyModel):
"""Every Application which uses passbook for authentication/identification/authorization
needs an Application record. Other authentication types can subclass this Model to
add custom fields and other properties"""
name = models.TextField(help_text=_("Application's display Name."))
slug = models.SlugField(help_text=_("Internal application name, used in URLs."))
skip_authorization = models.BooleanField(default=False)
provider = models.OneToOneField(
"Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT
)
meta_launch_url = models.URLField(default="", blank=True)
meta_icon_url = models.TextField(default="", blank=True)
meta_description = models.TextField(default="", blank=True)
meta_publisher = models.TextField(default="", blank=True)
objects = InheritanceManager()
def get_provider(self) -> Optional[Provider]:
"""Get casted provider instance"""
if not self.provider:
return None
return Provider.objects.get_subclass(pk=self.provider.pk)
def __str__(self):
return self.name
class Source(ExportModelOperationsMixin("source"), PolicyModel):
"""Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server"""
name = models.TextField(help_text=_("Source's display Name."))
slug = models.SlugField(help_text=_("Internal source name, used in URLs."))
enabled = models.BooleanField(default=True)
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
form = "" # ModelForm-based class ued to create/edit instance
objects = InheritanceManager()
@property
def ui_login_button(self) -> Optional[UILoginButton]:
"""If source uses a http-based flow, return UI Information about the login
button. If source doesn't use http-based flow, return None."""
return None
@property
def ui_additional_info(self) -> Optional[str]:
"""Return additional Info, such as a callback URL. Show in the administration interface."""
return None
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return self.name
class UserSourceConnection(CreatedUpdatedModel):
"""Connection between User and Source."""
user = models.ForeignKey(User, on_delete=models.CASCADE)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
class Meta:
unique_together = (("user", "source"),)
class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel):
"""Policies which specify if a user is authorized to use an Application. Can be overridden by
other types to add other fields, more logic, etc."""
name = models.TextField(blank=True, null=True)
negate = models.BooleanField(default=False)
order = models.IntegerField(default=0)
timeout = models.IntegerField(default=30)
objects = InheritanceManager()
def __str__(self):
return f"Policy {self.name}"
def passes(self, request: PolicyRequest) -> PolicyResult:
|
class DebugPolicy(Policy):
"""Policy used for debugging the PolicyEngine. Returns a fixed result,
but takes a random time to process."""
result = models.BooleanField(default=False)
wait_min = models.IntegerField(default=5)
wait_max = models.IntegerField(default=30)
form = "passbook.core.forms.policies.DebugPolicyForm"
def passes(self, request: PolicyRequest) -> PolicyResult:
"""Wait random time then return result"""
wait = SystemRandom().randrange(self.wait_min, self.wait_max)
LOGGER.debug("Policy waiting", policy=self, delay=wait)
sleep(wait)
return PolicyResult(self.result, "Debugging")
class Meta:
verbose_name = _("Debug Policy")
verbose_name_plural = _("Debug Policies")
class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel):
"""Single-use invitation link"""
created_by = models.ForeignKey("User", on_delete=models.CASCADE)
expires = models.DateTimeField(default=None, blank=True, null=True)
fixed_username = models.TextField(blank=True, default=None)
fixed_email = models.TextField(blank=True, default=None)
needs_confirmation = models.BooleanField(default=True)
@property
def link(self):
"""Get link to use invitation"""
return (
reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}"
)
def __str__(self):
return f"Invitation {self.uuid.hex} created by {self.created_by}"
class Meta:
verbose_name = _("Invitation")
verbose_name_plural = _("Invitations")
class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel):
"""One-time link for password resets/sign-up-confirmations"""
expires = models.DateTimeField(default=default_nonce_duration)
user = models.ForeignKey("User", on_delete=models.CASCADE)
expiring = models.BooleanField(default=True)
description = models.TextField(default="", blank=True)
@property
def is_expired(self) -> bool:
"""Check if nonce is expired yet."""
return now() > self.expires
def __str__(self):
return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})"
class Meta:
verbose_name = _("Nonce")
verbose_name_plural = _("Nonces")
class PropertyMapping(UUIDModel):
"""User-defined key -> x mapping which can be used by providers to expose extra data."""
name = models.TextField()
expression = models.TextField()
form = ""
objects = InheritanceManager()
def evaluate(
self, user: Optional[User], request: Optional[HttpRequest], **kwargs
) -> Any:
"""Evaluate `self.expression` using `**kwargs` as Context."""
try:
expression = NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise PropertyMappingExpressionException from exc
try:
response = expression.render(user=user, request=request, **kwargs)
if isinstance(response, Undefined):
raise PropertyMappingExpressionException("Response was 'Undefined'")
return response
except UndefinedError as exc:
raise PropertyMappingExpressionException from exc
def save(self, *args, **kwargs):
try:
NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise ValidationError("Expression Syntax Error") from exc
return super().save(*args, **kwargs)
def __str__(self):
return f"Property Mapping {self.name}"
class Meta:
verbose_name = _("Property Mapping")
verbose_name_plural = _("Property Mappings")
| """Check if user instance passes this policy"""
raise PolicyException() | identifier_body |
models.py | """passbook core models"""
from datetime import timedelta
from random import SystemRandom
from time import sleep
from typing import Any, Optional
from uuid import uuid4
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.db import models
from django.http import HttpRequest
from django.urls import reverse_lazy
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django_prometheus.models import ExportModelOperationsMixin
from guardian.mixins import GuardianUserMixin
from jinja2 import Undefined
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.nativetypes import NativeEnvironment
from model_utils.managers import InheritanceManager
from structlog import get_logger
from passbook.core.exceptions import PropertyMappingExpressionException
from passbook.core.signals import password_changed
from passbook.core.types import UILoginButton, UIUserSettings
from passbook.lib.models import CreatedUpdatedModel, UUIDModel
from passbook.policies.exceptions import PolicyException
from passbook.policies.types import PolicyRequest, PolicyResult
LOGGER = get_logger()
NATIVE_ENVIRONMENT = NativeEnvironment()
def default_nonce_duration():
"""Default duration a Nonce is valid"""
return now() + timedelta(hours=4)
class Group(ExportModelOperationsMixin("group"), UUIDModel):
"""Custom Group model which supports a basic hierarchy"""
name = models.CharField(_("name"), max_length=80)
parent = models.ForeignKey(
"Group",
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="children",
)
attributes = JSONField(default=dict, blank=True)
def __str__(self):
return f"Group {self.name}"
class Meta:
unique_together = (("name", "parent",),)
class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser):
"""Custom User model to allow easier adding o f user-based settings"""
uuid = models.UUIDField(default=uuid4, editable=False)
name = models.TextField(help_text=_("User's display name."))
sources = models.ManyToManyField("Source", through="UserSourceConnection")
groups = models.ManyToManyField("Group")
password_change_date = models.DateTimeField(auto_now_add=True)
attributes = JSONField(default=dict, blank=True)
def set_password(self, password):
if self.pk:
password_changed.send(sender=self, user=self, password=password)
self.password_change_date = now()
return super().set_password(password)
class Meta:
permissions = (("reset_user_password", "Reset Password"),)
class Provider(ExportModelOperationsMixin("provider"), models.Model):
"""Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application"""
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
objects = InheritanceManager()
# This class defines no field for easier inheritance
def __str__(self):
if hasattr(self, "name"):
return getattr(self, "name")
return super().__str__()
class PolicyModel(UUIDModel, CreatedUpdatedModel):
"""Base model which can have policies applied to it"""
policies = models.ManyToManyField("Policy", blank=True)
class Factor(ExportModelOperationsMixin("factor"), PolicyModel):
"""Authentication factor, multiple instances of the same Factor can be used"""
name = models.TextField(help_text=_("Factor's display Name."))
slug = models.SlugField(
unique=True, help_text=_("Internal factor name, used in URLs.")
)
order = models.IntegerField()
enabled = models.BooleanField(default=True)
objects = InheritanceManager()
type = ""
form = ""
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return f"Factor {self.slug}"
class Application(ExportModelOperationsMixin("application"), PolicyModel):
"""Every Application which uses passbook for authentication/identification/authorization
needs an Application record. Other authentication types can subclass this Model to
add custom fields and other properties"""
name = models.TextField(help_text=_("Application's display Name."))
slug = models.SlugField(help_text=_("Internal application name, used in URLs."))
skip_authorization = models.BooleanField(default=False)
provider = models.OneToOneField(
"Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT
)
meta_launch_url = models.URLField(default="", blank=True)
meta_icon_url = models.TextField(default="", blank=True)
meta_description = models.TextField(default="", blank=True)
meta_publisher = models.TextField(default="", blank=True)
objects = InheritanceManager()
def get_provider(self) -> Optional[Provider]:
"""Get casted provider instance"""
if not self.provider:
return None
return Provider.objects.get_subclass(pk=self.provider.pk)
def __str__(self):
return self.name
class Source(ExportModelOperationsMixin("source"), PolicyModel):
"""Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server"""
name = models.TextField(help_text=_("Source's display Name."))
slug = models.SlugField(help_text=_("Internal source name, used in URLs."))
enabled = models.BooleanField(default=True)
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
form = "" # ModelForm-based class ued to create/edit instance
objects = InheritanceManager()
@property
def ui_login_button(self) -> Optional[UILoginButton]:
"""If source uses a http-based flow, return UI Information about the login
button. If source doesn't use http-based flow, return None."""
return None
@property
def ui_additional_info(self) -> Optional[str]:
"""Return additional Info, such as a callback URL. Show in the administration interface."""
return None
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return self.name
class UserSourceConnection(CreatedUpdatedModel):
"""Connection between User and Source."""
user = models.ForeignKey(User, on_delete=models.CASCADE)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
class Meta:
unique_together = (("user", "source"),)
class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel):
"""Policies which specify if a user is authorized to use an Application. Can be overridden by
other types to add other fields, more logic, etc."""
name = models.TextField(blank=True, null=True)
negate = models.BooleanField(default=False)
order = models.IntegerField(default=0)
timeout = models.IntegerField(default=30)
objects = InheritanceManager()
def __str__(self):
return f"Policy {self.name}"
def passes(self, request: PolicyRequest) -> PolicyResult:
"""Check if user instance passes this policy""" | """Policy used for debugging the PolicyEngine. Returns a fixed result,
but takes a random time to process."""
result = models.BooleanField(default=False)
wait_min = models.IntegerField(default=5)
wait_max = models.IntegerField(default=30)
form = "passbook.core.forms.policies.DebugPolicyForm"
def passes(self, request: PolicyRequest) -> PolicyResult:
"""Wait random time then return result"""
wait = SystemRandom().randrange(self.wait_min, self.wait_max)
LOGGER.debug("Policy waiting", policy=self, delay=wait)
sleep(wait)
return PolicyResult(self.result, "Debugging")
class Meta:
verbose_name = _("Debug Policy")
verbose_name_plural = _("Debug Policies")
class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel):
"""Single-use invitation link"""
created_by = models.ForeignKey("User", on_delete=models.CASCADE)
expires = models.DateTimeField(default=None, blank=True, null=True)
fixed_username = models.TextField(blank=True, default=None)
fixed_email = models.TextField(blank=True, default=None)
needs_confirmation = models.BooleanField(default=True)
@property
def link(self):
"""Get link to use invitation"""
return (
reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}"
)
def __str__(self):
return f"Invitation {self.uuid.hex} created by {self.created_by}"
class Meta:
verbose_name = _("Invitation")
verbose_name_plural = _("Invitations")
class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel):
"""One-time link for password resets/sign-up-confirmations"""
expires = models.DateTimeField(default=default_nonce_duration)
user = models.ForeignKey("User", on_delete=models.CASCADE)
expiring = models.BooleanField(default=True)
description = models.TextField(default="", blank=True)
@property
def is_expired(self) -> bool:
"""Check if nonce is expired yet."""
return now() > self.expires
def __str__(self):
return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})"
class Meta:
verbose_name = _("Nonce")
verbose_name_plural = _("Nonces")
class PropertyMapping(UUIDModel):
"""User-defined key -> x mapping which can be used by providers to expose extra data."""
name = models.TextField()
expression = models.TextField()
form = ""
objects = InheritanceManager()
def evaluate(
self, user: Optional[User], request: Optional[HttpRequest], **kwargs
) -> Any:
"""Evaluate `self.expression` using `**kwargs` as Context."""
try:
expression = NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise PropertyMappingExpressionException from exc
try:
response = expression.render(user=user, request=request, **kwargs)
if isinstance(response, Undefined):
raise PropertyMappingExpressionException("Response was 'Undefined'")
return response
except UndefinedError as exc:
raise PropertyMappingExpressionException from exc
def save(self, *args, **kwargs):
try:
NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise ValidationError("Expression Syntax Error") from exc
return super().save(*args, **kwargs)
def __str__(self):
return f"Property Mapping {self.name}"
class Meta:
verbose_name = _("Property Mapping")
verbose_name_plural = _("Property Mappings") | raise PolicyException()
class DebugPolicy(Policy): | random_line_split |
models.py | """passbook core models"""
from datetime import timedelta
from random import SystemRandom
from time import sleep
from typing import Any, Optional
from uuid import uuid4
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.db import models
from django.http import HttpRequest
from django.urls import reverse_lazy
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django_prometheus.models import ExportModelOperationsMixin
from guardian.mixins import GuardianUserMixin
from jinja2 import Undefined
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.nativetypes import NativeEnvironment
from model_utils.managers import InheritanceManager
from structlog import get_logger
from passbook.core.exceptions import PropertyMappingExpressionException
from passbook.core.signals import password_changed
from passbook.core.types import UILoginButton, UIUserSettings
from passbook.lib.models import CreatedUpdatedModel, UUIDModel
from passbook.policies.exceptions import PolicyException
from passbook.policies.types import PolicyRequest, PolicyResult
LOGGER = get_logger()
NATIVE_ENVIRONMENT = NativeEnvironment()
def default_nonce_duration():
"""Default duration a Nonce is valid"""
return now() + timedelta(hours=4)
class Group(ExportModelOperationsMixin("group"), UUIDModel):
"""Custom Group model which supports a basic hierarchy"""
name = models.CharField(_("name"), max_length=80)
parent = models.ForeignKey(
"Group",
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="children",
)
attributes = JSONField(default=dict, blank=True)
def __str__(self):
return f"Group {self.name}"
class Meta:
unique_together = (("name", "parent",),)
class User(ExportModelOperationsMixin("user"), GuardianUserMixin, AbstractUser):
"""Custom User model to allow easier adding o f user-based settings"""
uuid = models.UUIDField(default=uuid4, editable=False)
name = models.TextField(help_text=_("User's display name."))
sources = models.ManyToManyField("Source", through="UserSourceConnection")
groups = models.ManyToManyField("Group")
password_change_date = models.DateTimeField(auto_now_add=True)
attributes = JSONField(default=dict, blank=True)
def set_password(self, password):
if self.pk:
password_changed.send(sender=self, user=self, password=password)
self.password_change_date = now()
return super().set_password(password)
class Meta:
permissions = (("reset_user_password", "Reset Password"),)
class Provider(ExportModelOperationsMixin("provider"), models.Model):
"""Application-independent Provider instance. For example SAML2 Remote, OAuth2 Application"""
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
objects = InheritanceManager()
# This class defines no field for easier inheritance
def __str__(self):
if hasattr(self, "name"):
|
return super().__str__()
class PolicyModel(UUIDModel, CreatedUpdatedModel):
"""Base model which can have policies applied to it"""
policies = models.ManyToManyField("Policy", blank=True)
class Factor(ExportModelOperationsMixin("factor"), PolicyModel):
"""Authentication factor, multiple instances of the same Factor can be used"""
name = models.TextField(help_text=_("Factor's display Name."))
slug = models.SlugField(
unique=True, help_text=_("Internal factor name, used in URLs.")
)
order = models.IntegerField()
enabled = models.BooleanField(default=True)
objects = InheritanceManager()
type = ""
form = ""
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return f"Factor {self.slug}"
class Application(ExportModelOperationsMixin("application"), PolicyModel):
"""Every Application which uses passbook for authentication/identification/authorization
needs an Application record. Other authentication types can subclass this Model to
add custom fields and other properties"""
name = models.TextField(help_text=_("Application's display Name."))
slug = models.SlugField(help_text=_("Internal application name, used in URLs."))
skip_authorization = models.BooleanField(default=False)
provider = models.OneToOneField(
"Provider", null=True, blank=True, default=None, on_delete=models.SET_DEFAULT
)
meta_launch_url = models.URLField(default="", blank=True)
meta_icon_url = models.TextField(default="", blank=True)
meta_description = models.TextField(default="", blank=True)
meta_publisher = models.TextField(default="", blank=True)
objects = InheritanceManager()
def get_provider(self) -> Optional[Provider]:
"""Get casted provider instance"""
if not self.provider:
return None
return Provider.objects.get_subclass(pk=self.provider.pk)
def __str__(self):
return self.name
class Source(ExportModelOperationsMixin("source"), PolicyModel):
"""Base Authentication source, i.e. an OAuth Provider, SAML Remote or LDAP Server"""
name = models.TextField(help_text=_("Source's display Name."))
slug = models.SlugField(help_text=_("Internal source name, used in URLs."))
enabled = models.BooleanField(default=True)
property_mappings = models.ManyToManyField(
"PropertyMapping", default=None, blank=True
)
form = "" # ModelForm-based class ued to create/edit instance
objects = InheritanceManager()
@property
def ui_login_button(self) -> Optional[UILoginButton]:
"""If source uses a http-based flow, return UI Information about the login
button. If source doesn't use http-based flow, return None."""
return None
@property
def ui_additional_info(self) -> Optional[str]:
"""Return additional Info, such as a callback URL. Show in the administration interface."""
return None
@property
def ui_user_settings(self) -> Optional[UIUserSettings]:
"""Entrypoint to integrate with User settings. Can either return None if no
user settings are available, or an instanace of UIUserSettings."""
return None
def __str__(self):
return self.name
class UserSourceConnection(CreatedUpdatedModel):
"""Connection between User and Source."""
user = models.ForeignKey(User, on_delete=models.CASCADE)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
class Meta:
unique_together = (("user", "source"),)
class Policy(ExportModelOperationsMixin("policy"), UUIDModel, CreatedUpdatedModel):
"""Policies which specify if a user is authorized to use an Application. Can be overridden by
other types to add other fields, more logic, etc."""
name = models.TextField(blank=True, null=True)
negate = models.BooleanField(default=False)
order = models.IntegerField(default=0)
timeout = models.IntegerField(default=30)
objects = InheritanceManager()
def __str__(self):
return f"Policy {self.name}"
def passes(self, request: PolicyRequest) -> PolicyResult:
"""Check if user instance passes this policy"""
raise PolicyException()
class DebugPolicy(Policy):
"""Policy used for debugging the PolicyEngine. Returns a fixed result,
but takes a random time to process."""
result = models.BooleanField(default=False)
wait_min = models.IntegerField(default=5)
wait_max = models.IntegerField(default=30)
form = "passbook.core.forms.policies.DebugPolicyForm"
def passes(self, request: PolicyRequest) -> PolicyResult:
"""Wait random time then return result"""
wait = SystemRandom().randrange(self.wait_min, self.wait_max)
LOGGER.debug("Policy waiting", policy=self, delay=wait)
sleep(wait)
return PolicyResult(self.result, "Debugging")
class Meta:
verbose_name = _("Debug Policy")
verbose_name_plural = _("Debug Policies")
class Invitation(ExportModelOperationsMixin("invitation"), UUIDModel):
"""Single-use invitation link"""
created_by = models.ForeignKey("User", on_delete=models.CASCADE)
expires = models.DateTimeField(default=None, blank=True, null=True)
fixed_username = models.TextField(blank=True, default=None)
fixed_email = models.TextField(blank=True, default=None)
needs_confirmation = models.BooleanField(default=True)
@property
def link(self):
"""Get link to use invitation"""
return (
reverse_lazy("passbook_core:auth-sign-up") + f"?invitation={self.uuid.hex}"
)
def __str__(self):
return f"Invitation {self.uuid.hex} created by {self.created_by}"
class Meta:
verbose_name = _("Invitation")
verbose_name_plural = _("Invitations")
class Nonce(ExportModelOperationsMixin("nonce"), UUIDModel):
"""One-time link for password resets/sign-up-confirmations"""
expires = models.DateTimeField(default=default_nonce_duration)
user = models.ForeignKey("User", on_delete=models.CASCADE)
expiring = models.BooleanField(default=True)
description = models.TextField(default="", blank=True)
@property
def is_expired(self) -> bool:
"""Check if nonce is expired yet."""
return now() > self.expires
def __str__(self):
return f"Nonce f{self.uuid.hex} {self.description} (expires={self.expires})"
class Meta:
verbose_name = _("Nonce")
verbose_name_plural = _("Nonces")
class PropertyMapping(UUIDModel):
"""User-defined key -> x mapping which can be used by providers to expose extra data."""
name = models.TextField()
expression = models.TextField()
form = ""
objects = InheritanceManager()
def evaluate(
self, user: Optional[User], request: Optional[HttpRequest], **kwargs
) -> Any:
"""Evaluate `self.expression` using `**kwargs` as Context."""
try:
expression = NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise PropertyMappingExpressionException from exc
try:
response = expression.render(user=user, request=request, **kwargs)
if isinstance(response, Undefined):
raise PropertyMappingExpressionException("Response was 'Undefined'")
return response
except UndefinedError as exc:
raise PropertyMappingExpressionException from exc
def save(self, *args, **kwargs):
try:
NATIVE_ENVIRONMENT.from_string(self.expression)
except TemplateSyntaxError as exc:
raise ValidationError("Expression Syntax Error") from exc
return super().save(*args, **kwargs)
def __str__(self):
return f"Property Mapping {self.name}"
class Meta:
verbose_name = _("Property Mapping")
verbose_name_plural = _("Property Mappings")
| return getattr(self, "name") | conditional_block |
Ui_Offer_Home.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Administrator\Desktop\Python\ERP\Offer_Home.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import pymysql
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, QObject, Qt, pyqtSlot
class Ui_Offer_Home(object):
def setupUi(self, Offer_Home):
Offer_Home.setObjectName("Offer_Home")
Offer_Home.resize(1024, 768)
Offer_Home.setToolTip("")
self.verticalLayout = QtWidgets.QVBoxLayout(Offer_Home)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.Button_offernew = QtWidgets.QPushButton(Offer_Home)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/Add_16px_528841_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_offernew.setIcon(icon)
self.Button_offernew.setObjectName("Button_offernew")
self.horizontalLayout.addWidget(self.Button_offernew)
self.Button_audit = QtWidgets.QPushButton(Offer_Home)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("images/Accept_16px_528836_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_audit.setIcon(icon1)
self.Button_audit.setObjectName("Button_audit")
self.horizontalLayout.addWidget(self.Button_audit)
self.Box_group = QtWidgets.QComboBox(Offer_Home)
self.Box_group.setObjectName("Box_group")
self.horizontalLayout.addWidget(self.Box_group)
self.Box_filter = QtWidgets.QComboBox(Offer_Home)
self.Box_filter.setObjectName("Box_filter")
self.horizontalLayout.addWidget(self.Box_filter)
self.Line_search = QtWidgets.QLineEdit(Offer_Home)
self.Line_search.setText("")
self.Line_search.setObjectName("Line_search")
self.horizontalLayout.addWidget(self.Line_search)
self.verticalLayout.addLayout(self.horizontalLayout)
#连接数据库
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur = db.cursor()
cur.execute("SELECT * FROM 报价基本信息")
data = cur.fetchall() #接收全部的返回结果行
col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类
#数据的大小
row = len(data) #获得data的行数
vol = len(data[0]) #获得data的卷数.第一行的数量(列数)
#插入表格
self.Widget_catalog = QTableWidget(row,vol) #目录表
self.Widget_details = QTableWidget(row,vol) #明细表
font = QtGui.QFont('微软雅黑',9)
#设置字体、表头
self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体
self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题
#设置竖直方向表头不可见
# self.Widget_catalog.verticalHeader().setVisible(False)
self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框
#设置表格颜色
self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单
self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽
# self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑
self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中
self.verticalLayout.addWidget(self.Widget_catalog)
#构建表格插入数据
for i in range(row): #i到row-1的数量
for j in range(vol):
temp_data = data[i][j] # 临时记录,不能直接插入表格
data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data1)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catalog.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度
db.close
cur.close
#报价明细区域
# db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
conn = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'"
conn.execute(sql)
col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类
vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0])
self.Widget_details = QTableWidget(100,vol_1)
self.Widget_details.setHorizontalHeaderLabels(col_lst_1)
# self.Widget_details.verticalHeader().setVisible(False)
self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框
self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_details.setObjectName("报价明细")
self.verticalLayout.addWidget(self.Widget_details)
self.Widget_details.resizeColumnsToContents() #自适应字段宽度
db.close
conn.close
self.retranslateUi(Offer_Home)
QtCore.QMetaObject.connectSlotsByName(Offer_Home)
#测试显示报价明细
# self.Button_offernew.clicked.connect(self.querycl)
self.Widget_catalog.itemClicked.connect(self.querydt)
self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db
# self.Button_offernew.clicked.connect(self.msg)
def generateMenu(self, pos):
row_num = -1
for i in self.Widget_catalog.selectionModel().selection().indexes():
row_num = i.row()
if row_num < 2 :
menu = QMenu()
item1 = menu.addAction(u"通过")
item2 = menu.addAction(u"未通过")
action = menu.exec_(self.Widget_catalog.mapToGlobal(pos))
if action == item1:
print('你选了通过')
elif action == item2:
print('你选了未通过')
else:
return
def querycl(self, db):#查询报价目录
lsearch = self.Line_search.text() #搜索框
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
curr = db.cursor()
print('you search=> '+ lsearch)
sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'"
curr.execute(sql)
self.Widget_catalog.clearContents()
data_2 = curr.fetchall()
row_2 = len(data_2) #获得data的行数
vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_2): #i到row-1的数量
for j in range(vol_2):
temp_data = data_2[i][j] # 临时记录,不能直接插入表格
data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data2)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catalog.resizeRowsToContents() #自适应行高
def querydt(self):#查询报价明细querydt(self, item)
# print('you selected => '+ item.text())
# self.Line_search.setText(item.text()) #搜索框等于点击表格的值
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur_3 = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'"
cur_3.execute(sql)
self.Widget_details.clearContents()
data_3 = cur_3.fetchall()
row_3 = len(data_3) #获得data的行数
vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_3): #i到row-1的数量
for | self.Box_group.setToolTip(_translate("Offer_Home", "分组"))
self.Box_filter.setToolTip(_translate("Offer_Home", "筛选"))
self.Line_search.setToolTip(_translate("Offer_Home", "搜索"))
self.Line_search.setPlaceholderText(_translate("Offer_Home", "搜索...."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Offer_Home = QtWidgets.QWidget()
ui = Ui_Offer_Home()
ui.setupUi(Offer_Home)
Offer_Home.show()
sys.exit(app.exec_())
| j in range(vol_3):
temp_data = data_3[i][j] # 临时记录,不能直接插入表格
data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_details.setItem(i, j, data3)
self.Widget_details.resizeColumnsToContents() #自适应宽度
self.Widget_details.resizeRowsToContents() #自适应行高
#更新数据
def up_data(self,cur,db):
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
print('you bjdh=> '+ bjdh)
cur.execute("UPDATE 报价基本信息 SET 状态='通过' WHERE 报价单号 = '"+bjdh+"'")
db.commit()
reply = QMessageBox.information(QWidget(), "标题", "审核成功" )
print( reply )
# def msg(self):
# reply = QMessageBox.information(QWidget(), "标题", "对话框消息正文", QMessageBox.Yes | QMessageBox.No , QMessageBox.Yes )
# print( reply )
def retranslateUi(self, Offer_Home):
_translate = QtCore.QCoreApplication.translate
Offer_Home.setWindowTitle(_translate("Offer_Home", "报价首页"))
self.Button_offernew.setText(_translate("Offer_Home", "新建"))
self.Button_audit.setText(_translate("Offer_Home", "审核"))
| identifier_body |
Ui_Offer_Home.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Administrator\Desktop\Python\ERP\Offer_Home.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import pymysql
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, QObject, Qt, pyqtSlot
class Ui_Offer_Home(object):
def setupUi(self, Offer_Home):
Offer_Home.setObjectName("Offer_Home")
Offer_Home.resize(1024, 768)
Offer_Home.setToolTip("")
self.verticalLayout = QtWidgets.QVBoxLayout(Offer_Home)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.Button_offernew = QtWidgets.QPushButton(Offer_Home)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/Add_16px_528841_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_offernew.setIcon(icon)
self.Button_offernew.setObjectName("Button_offernew")
self.horizontalLayout.addWidget(self.Button_offernew)
self.Button_audit = QtWidgets.QPushButton(Offer_Home)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("images/Accept_16px_528836_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_audit.setIcon(icon1)
self.Button_audit.setObjectName("Button_audit")
self.horizontalLayout.addWidget(self.Button_audit)
self.Box_group = QtWidgets.QComboBox(Offer_Home)
self.Box_group.setObjectName("Box_group")
self.horizontalLayout.addWidget(self.Box_group)
self.Box_filter = QtWidgets.QComboBox(Offer_Home)
self.Box_filter.setObjectName("Box_filter")
self.horizontalLayout.addWidget(self.Box_filter)
self.Line_search = QtWidgets.QLineEdit(Offer_Home)
self.Line_search.setText("")
self.Line_search.setObjectName("Line_search")
self.horizontalLayout.addWidget(self.Line_search)
self.verticalLayout.addLayout(self.horizontalLayout)
#连接数据库
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur = db.cursor()
cur.execute("SELECT * FROM 报价基本信息")
data = cur.fetchall() #接收全部的返回结果行
col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类
#数据的大小
row = len(data) #获得data的行数
vol = len(data[0]) #获得data的卷数.第一行的数量(列数)
#插入表格
self.Widget_catalog = QTableWidget(row,vol) #目录表
self.Widget_details = QTableWidget(row,vol) #明细表
font = QtGui.QFont('微软雅黑',9)
#设置字体、表头
self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体
self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题
#设置竖直方向表头不可见
# self.Widget_catalog.verticalHeader().setVisible(False)
self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框
#设置表格颜色
self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单
self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽
# self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑
self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中
self.verticalLayout.addWidget(self.Widget_catalog)
#构建表格插入数据
for i in range(row): #i到row-1的数量
for j in range(vol):
temp_data = data[i][j] # 临时记录,不能直接插入表格
data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data1)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catalog.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度
db.close
cur.close
#报价明细区域
# db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
conn = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'"
conn.execute(sql)
col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类
vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0])
self.Widget_details = QTableWidget(100,vol_1)
self.Widget_details.setHorizontalHeaderLabels(col_lst_1)
# self.Widget_details.verticalHeader().setVisible(False)
self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框
self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_details.setObjectName("报价明细")
self.verticalLayout.addWidget(self.Widget_details)
self.Widget_details.resizeColumnsToContents() #自适应字段宽度
db.close
conn.close
self.retranslateUi(Offer_Home)
QtCore.QMetaObject.connectSlotsByName(Offer_Home)
#测试显示报价明细
# self.Button_offernew.clicked.connect(self.querycl)
self.Widget_catalog.itemClicked.connect(self.querydt)
self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db
# self.Button_offernew.clicked.connect(self.msg)
def generateMenu(self, pos):
row_num = -1
for i in self.Widget_catalog.selectionModel().selection().indexes():
row_num = i.row()
if row_num < 2 :
menu = QMenu()
item1 = menu.addAction(u"通过")
item2 = menu.addAction(u"未通过")
action = menu.exec_(self.Widget_catalog.mapToGlobal(pos))
if action == item1:
print('你选了通过')
elif action == item2:
print('你选了未通过')
else:
return
def querycl(self, db):#查询报价目录
lsearch = self.Line_search.text() #搜索框
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
curr = db.cursor()
print('you search=> '+ lsearch)
sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'"
curr.execute(sql)
self.Widget_catalog.clearContents()
data_2 = curr.fetchall()
row_2 = len(data_2) #获得data的行数
vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_2): #i到row-1的数量
for j in range(vol_2):
temp_data = data_2[i][j] # 临时记录,不能直接插入表格
data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data2)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catalog.resizeRowsToContents() #自适应行高
def querydt(self):#查询报价明细querydt(self, item)
# print('you selected => '+ item.text())
# self.Line_search.setText(item.text()) #搜索框等于点击表格的值
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur_3 = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'"
cur_3.execute(sql)
self.Widget_details.clearContents()
data_3 = cur_3.fetchall()
row_3 = len(data_3) #获得data的行数
vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_3): #i到row-1的数量
for j in range(vol_3):
temp_data = data_3[i][j] # 临时记录,不能直接插入表格
data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_details.setItem(i, j, data3)
self.Widget_details.resizeColumnsToContents() #自适应宽度
self.Widget_details.resizeRowsToContents() #自适应行高
#更新数据
def up_data(self,cur,db):
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
print('you bjdh=> '+ bjdh)
cur.execute("UPDATE 报价基本信息 SET 状态='通过' WHERE 报价单号 = '"+bjdh+"'")
db.commit()
reply = QMessageBox.information(QWidget(), "标题", "审核成功" )
print( reply )
# def msg(self):
# reply = QMessageBox.information(QWidget(), "标题", "对话框消息正文", QMessageBox.Yes | QMessageBox.No , QMessageBox.Yes )
# print( reply )
def retranslateUi(self, Offer_Home):
_translate = QtCore.QCoreApplication.translate
Offer_Home.setWindowTitle(_translate("Offer_Home", "报价首页"))
self.Button_offernew.setText(_translate("Offer_Home", "新建"))
self.Button_audit.setText(_translate("Offer_Home", "审核"))
self.Box_group.setToolTip(_translate("Offer_Home", "分组"))
self.Box_filter.setToolTip(_translate("Offer_Home", "筛选"))
self.Line_search.setToolTip(_translate("Offer_Home", "搜索"))
self.Line_search.setPlaceholderText(_translate("Offer_Home", "搜索...."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Offer_Home = QtWidgets.QWidget()
ui = Ui_Offer_Home()
ui.setupUi(Offer_Home)
Offer_Home.show()
sys.exit(app.exec_())
| identifier_name |
||
Ui_Offer_Home.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Administrator\Desktop\Python\ERP\Offer_Home.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import pymysql
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, QObject, Qt, pyqtSlot
class Ui_Offer_Home(object):
def setupUi(self, Offer_Home):
Offer_Home.setObjectName("Offer_Home")
Offer_Home.resize(1024, 768)
Offer_Home.setToolTip("")
self.verticalLayout = QtWidgets.QVBoxLayout(Offer_Home)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.Button_offernew = QtWidgets.QPushButton(Offer_Home)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/Add_16px_528841_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_offernew.setIcon(icon)
self.Button_offernew.setObjectName("Button_offernew")
self.horizontalLayout.addWidget(self.Button_offernew)
self.Button_audit = QtWidgets.QPushButton(Offer_Home)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("images/Accept_16px_528836_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_audit.setIcon(icon1)
self.Button_audit.setObjectName("Button_audit")
self.horizontalLayout.addWidget(self.Button_audit)
self.Box_group = QtWidgets.QComboBox(Offer_Home)
self.Box_group.setObjectName("Box_group")
self.horizontalLayout.addWidget(self.Box_group)
self.Box_filter = QtWidgets.QComboBox(Offer_Home)
self.Box_filter.setObjectName("Box_filter")
self.horizontalLayout.addWidget(self.Box_filter)
self.Line_search = QtWidgets.QLineEdit(Offer_Home)
self.Line_search.setText("")
self.Line_search.setObjectName("Line_search")
self.horizontalLayout.addWidget(self.Line_search)
self.verticalLayout.addLayout(self.horizontalLayout)
#连接数据库
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur = db.cursor()
cur.execute("SELECT * FROM 报价基本信息")
data = cur.fetchall() #接收全部的返回结果行
col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类
#数据的大小
row = len(data) #获得data的行数
vol = len(data[0]) #获得data的卷数.第一行的数量(列数)
#插入表格
self.Widget_catalog = QTableWidget(row,vol) #目录表
self.Widget_details = QTableWidget(row,vol) #明细表
font = QtGui.QFont('微软雅黑',9)
#设置字体、表头
self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体
self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题
#设置竖直方向表头不可见
# self.Widget_catalog.verticalHeader().setVisible(False)
self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框
#设置表格颜色
self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单
self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽
# self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑
self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中
self.verticalLayout.addWidget(self.Widget_catalog)
#构建表格插入数据
for i in range(row): #i到row-1的数量
for j in range(vol):
temp_data = data[i][j] # 临时记录,不能直接插入表格
data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data1)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catal | conn = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'"
conn.execute(sql)
col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类
vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0])
self.Widget_details = QTableWidget(100,vol_1)
self.Widget_details.setHorizontalHeaderLabels(col_lst_1)
# self.Widget_details.verticalHeader().setVisible(False)
self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框
self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_details.setObjectName("报价明细")
self.verticalLayout.addWidget(self.Widget_details)
self.Widget_details.resizeColumnsToContents() #自适应字段宽度
db.close
conn.close
self.retranslateUi(Offer_Home)
QtCore.QMetaObject.connectSlotsByName(Offer_Home)
#测试显示报价明细
# self.Button_offernew.clicked.connect(self.querycl)
self.Widget_catalog.itemClicked.connect(self.querydt)
self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db
# self.Button_offernew.clicked.connect(self.msg)
def generateMenu(self, pos):
row_num = -1
for i in self.Widget_catalog.selectionModel().selection().indexes():
row_num = i.row()
if row_num < 2 :
menu = QMenu()
item1 = menu.addAction(u"通过")
item2 = menu.addAction(u"未通过")
action = menu.exec_(self.Widget_catalog.mapToGlobal(pos))
if action == item1:
print('你选了通过')
elif action == item2:
print('你选了未通过')
else:
return
def querycl(self, db):#查询报价目录
lsearch = self.Line_search.text() #搜索框
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
curr = db.cursor()
print('you search=> '+ lsearch)
sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'"
curr.execute(sql)
self.Widget_catalog.clearContents()
data_2 = curr.fetchall()
row_2 = len(data_2) #获得data的行数
vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_2): #i到row-1的数量
for j in range(vol_2):
temp_data = data_2[i][j] # 临时记录,不能直接插入表格
data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data2)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catalog.resizeRowsToContents() #自适应行高
def querydt(self):#查询报价明细querydt(self, item)
# print('you selected => '+ item.text())
# self.Line_search.setText(item.text()) #搜索框等于点击表格的值
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur_3 = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'"
cur_3.execute(sql)
self.Widget_details.clearContents()
data_3 = cur_3.fetchall()
row_3 = len(data_3) #获得data的行数
vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_3): #i到row-1的数量
for j in range(vol_3):
temp_data = data_3[i][j] # 临时记录,不能直接插入表格
data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_details.setItem(i, j, data3)
self.Widget_details.resizeColumnsToContents() #自适应宽度
self.Widget_details.resizeRowsToContents() #自适应行高
#更新数据
def up_data(self,cur,db):
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
print('you bjdh=> '+ bjdh)
cur.execute("UPDATE 报价基本信息 SET 状态='通过' WHERE 报价单号 = '"+bjdh+"'")
db.commit()
reply = QMessageBox.information(QWidget(), "标题", "审核成功" )
print( reply )
# def msg(self):
# reply = QMessageBox.information(QWidget(), "标题", "对话框消息正文", QMessageBox.Yes | QMessageBox.No , QMessageBox.Yes )
# print( reply )
def retranslateUi(self, Offer_Home):
_translate = QtCore.QCoreApplication.translate
Offer_Home.setWindowTitle(_translate("Offer_Home", "报价首页"))
self.Button_offernew.setText(_translate("Offer_Home", "新建"))
self.Button_audit.setText(_translate("Offer_Home", "审核"))
self.Box_group.setToolTip(_translate("Offer_Home", "分组"))
self.Box_filter.setToolTip(_translate("Offer_Home", "筛选"))
self.Line_search.setToolTip(_translate("Offer_Home", "搜索"))
self.Line_search.setPlaceholderText(_translate("Offer_Home", "搜索...."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Offer_Home = QtWidgets.QWidget()
ui = Ui_Offer_Home()
ui.setupUi(Offer_Home)
Offer_Home.show()
sys.exit(app.exec_())
| og.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度
db.close
cur.close
#报价明细区域
# db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
| conditional_block |
Ui_Offer_Home.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Administrator\Desktop\Python\ERP\Offer_Home.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import pymysql
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, QObject, Qt, pyqtSlot
class Ui_Offer_Home(object):
def setupUi(self, Offer_Home):
Offer_Home.setObjectName("Offer_Home")
Offer_Home.resize(1024, 768)
Offer_Home.setToolTip("") | self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.Button_offernew = QtWidgets.QPushButton(Offer_Home)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/Add_16px_528841_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_offernew.setIcon(icon)
self.Button_offernew.setObjectName("Button_offernew")
self.horizontalLayout.addWidget(self.Button_offernew)
self.Button_audit = QtWidgets.QPushButton(Offer_Home)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("images/Accept_16px_528836_easyicon.net.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_audit.setIcon(icon1)
self.Button_audit.setObjectName("Button_audit")
self.horizontalLayout.addWidget(self.Button_audit)
self.Box_group = QtWidgets.QComboBox(Offer_Home)
self.Box_group.setObjectName("Box_group")
self.horizontalLayout.addWidget(self.Box_group)
self.Box_filter = QtWidgets.QComboBox(Offer_Home)
self.Box_filter.setObjectName("Box_filter")
self.horizontalLayout.addWidget(self.Box_filter)
self.Line_search = QtWidgets.QLineEdit(Offer_Home)
self.Line_search.setText("")
self.Line_search.setObjectName("Line_search")
self.horizontalLayout.addWidget(self.Line_search)
self.verticalLayout.addLayout(self.horizontalLayout)
#连接数据库
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur = db.cursor()
cur.execute("SELECT * FROM 报价基本信息")
data = cur.fetchall() #接收全部的返回结果行
col_lst = [tup[0] for tup in cur.description] #数据列字段名 tup:数组 #description:种类
#数据的大小
row = len(data) #获得data的行数
vol = len(data[0]) #获得data的卷数.第一行的数量(列数)
#插入表格
self.Widget_catalog = QTableWidget(row,vol) #目录表
self.Widget_details = QTableWidget(row,vol) #明细表
font = QtGui.QFont('微软雅黑',9)
#设置字体、表头
self.Widget_catalog.horizontalHeader().setFont(font) #设置行表头字体
self.Widget_catalog.setHorizontalHeaderLabels(col_lst) #设置标题
#设置竖直方向表头不可见
# self.Widget_catalog.verticalHeader().setVisible(False)
self.Widget_catalog.setFrameShape(QFrame.NoFrame) #设置无边框
#设置表格颜色
self.Widget_catalog.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_catalog.setContextMenuPolicy(Qt.CustomContextMenu)#允许右键产生菜单
self.Widget_catalog.customContextMenuRequested.connect(self.generateMenu)#将右键绑定到槽
# self.Widget_catalog.setEditTriggers(QAbstractItemView.NoEditTriggers)#设置表格禁止编辑
self.Widget_catalog.setSelectionBehavior(QAbstractItemView.SelectRows)#设置整行选中
self.verticalLayout.addWidget(self.Widget_catalog)
#构建表格插入数据
for i in range(row): #i到row-1的数量
for j in range(vol):
temp_data = data[i][j] # 临时记录,不能直接插入表格
data1 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data1)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catalog.resizeRowsToContents() #自适应行高,这两句放最后可以等数据写入后自动适应表格数据宽度
db.close
cur.close
#报价明细区域
# db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
conn = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE 'BJ18011516'" #'%"+bjdh+"%'"
conn.execute(sql)
col_lst_1 = [tup[0] for tup in conn.description] #数据列字段名 tup:数组 #description:种类
vol_1 = len(conn.description) #获得data的卷数.第一行的数量(列数)cur.description len(data[0])
self.Widget_details = QTableWidget(100,vol_1)
self.Widget_details.setHorizontalHeaderLabels(col_lst_1)
# self.Widget_details.verticalHeader().setVisible(False)
self.Widget_details.setFrameShape(QFrame.NoFrame) #设置无边框
self.Widget_details.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.Widget_details.horizontalHeader().setStyleSheet('QHeaderView::section{background:skyblue}')
self.Widget_details.setObjectName("报价明细")
self.verticalLayout.addWidget(self.Widget_details)
self.Widget_details.resizeColumnsToContents() #自适应字段宽度
db.close
conn.close
self.retranslateUi(Offer_Home)
QtCore.QMetaObject.connectSlotsByName(Offer_Home)
#测试显示报价明细
# self.Button_offernew.clicked.connect(self.querycl)
self.Widget_catalog.itemClicked.connect(self.querydt)
self.Button_offernew.clicked.connect(partial(self.up_data, cur, db)) #更新实现 #partialial传递db
# self.Button_offernew.clicked.connect(self.msg)
def generateMenu(self, pos):
row_num = -1
for i in self.Widget_catalog.selectionModel().selection().indexes():
row_num = i.row()
if row_num < 2 :
menu = QMenu()
item1 = menu.addAction(u"通过")
item2 = menu.addAction(u"未通过")
action = menu.exec_(self.Widget_catalog.mapToGlobal(pos))
if action == item1:
print('你选了通过')
elif action == item2:
print('你选了未通过')
else:
return
def querycl(self, db):#查询报价目录
lsearch = self.Line_search.text() #搜索框
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
curr = db.cursor()
print('you search=> '+ lsearch)
sql = "SELECT * FROM 报价基本信息 WHERE 公司名称 LIKE '%"+lsearch+"%'" #'%"+bjdh+"%'"
curr.execute(sql)
self.Widget_catalog.clearContents()
data_2 = curr.fetchall()
row_2 = len(data_2) #获得data的行数
vol_2 = len(curr.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_2): #i到row-1的数量
for j in range(vol_2):
temp_data = data_2[i][j] # 临时记录,不能直接插入表格
data2 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_catalog.setItem(i, j, data2)
self.Widget_catalog.resizeColumnsToContents() #自适应宽度
self.Widget_catalog.resizeRowsToContents() #自适应行高
def querydt(self):#查询报价明细querydt(self, item)
# print('you selected => '+ item.text())
# self.Line_search.setText(item.text()) #搜索框等于点击表格的值
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
db = pymysql.connect(host='127.0.0.1', port=3308, user='root', password='root', db='mrp',charset='utf8',)
cur_3 = db.cursor()
sql = "SELECT * FROM 报价明细 WHERE 报价单号 LIKE '%"+bjdh+"%'" #'%"+bjdh+"%'"
cur_3.execute(sql)
self.Widget_details.clearContents()
data_3 = cur_3.fetchall()
row_3 = len(data_3) #获得data的行数
vol_3 = len(cur_3.description) #获得data的列数.cur.description len(data[0])
#构建表格插入数据
for i in range(row_3): #i到row-1的数量
for j in range(vol_3):
temp_data = data_3[i][j] # 临时记录,不能直接插入表格
data3 = QTableWidgetItem(str(temp_data)) # 转换后可插入表格
self.Widget_details.setItem(i, j, data3)
self.Widget_details.resizeColumnsToContents() #自适应宽度
self.Widget_details.resizeRowsToContents() #自适应行高
#更新数据
def up_data(self,cur,db):
h = self.Widget_catalog.currentIndex().row() #找到所选行的行数h
bjdh = self.Widget_catalog.item(h, 0).text() #找到所选h行的0位报价单号
print('you bjdh=> '+ bjdh)
cur.execute("UPDATE 报价基本信息 SET 状态='通过' WHERE 报价单号 = '"+bjdh+"'")
db.commit()
reply = QMessageBox.information(QWidget(), "标题", "审核成功" )
print( reply )
# def msg(self):
# reply = QMessageBox.information(QWidget(), "标题", "对话框消息正文", QMessageBox.Yes | QMessageBox.No , QMessageBox.Yes )
# print( reply )
def retranslateUi(self, Offer_Home):
_translate = QtCore.QCoreApplication.translate
Offer_Home.setWindowTitle(_translate("Offer_Home", "报价首页"))
self.Button_offernew.setText(_translate("Offer_Home", "新建"))
self.Button_audit.setText(_translate("Offer_Home", "审核"))
self.Box_group.setToolTip(_translate("Offer_Home", "分组"))
self.Box_filter.setToolTip(_translate("Offer_Home", "筛选"))
self.Line_search.setToolTip(_translate("Offer_Home", "搜索"))
self.Line_search.setPlaceholderText(_translate("Offer_Home", "搜索...."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Offer_Home = QtWidgets.QWidget()
ui = Ui_Offer_Home()
ui.setupUi(Offer_Home)
Offer_Home.show()
sys.exit(app.exec_()) | self.verticalLayout = QtWidgets.QVBoxLayout(Offer_Home) | random_line_split |
utils.rs | pub const PLUMO_SETUP_PERSONALIZATION: &[u8] = b"PLUMOSET";
pub const ADDRESS_LENGTH: usize = 20;
pub const ADDRESS_LENGTH_IN_HEX: usize = 42;
pub const SIGNATURE_LENGTH_IN_HEX: usize = 130;
pub const DEFAULT_MAX_RETRIES: usize = 5;
pub const ONE_MB: usize = 1024 * 1024;
pub const DEFAULT_CHUNK_SIZE: u64 = 1 * (ONE_MB as u64);
pub const DEFAULT_NUM_PARALLEL_CHUNKS: usize = 50;
pub const DEFAULT_CHUNK_TIMEOUT_IN_SECONDS: u64 = 300;
pub const BEACON_HASH_LENGTH: usize = 32;
use crate::blobstore::{upload_access_key, upload_sas};
use crate::data_structs::{
Attestation, Ceremony, Parameters, PlumoSetupKeys, ProcessorData, Response,
};
use crate::error::{UtilsError, VerifyTranscriptError};
use age::{
armor::{ArmoredWriter, Format},
EncryptError, Encryptor,
};
use algebra::PairingEngine;
use anyhow::Result;
use ethers::types::{Address, Signature};
use hex::ToHex;
use phase1::{ContributionMode, Phase1Parameters, ProvingSystem};
use reqwest::header::{AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, RANGE};
use secrecy::{ExposeSecret, SecretString, SecretVec};
use serde::Serialize;
use std::{
fs::{copy, remove_file, File, OpenOptions},
io::{Read, Write},
path::Path,
str::FromStr,
};
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Phase {
Phase1,
Phase2,
}
pub fn string_to_phase(str: &str) -> Result<Phase> {
match str.to_lowercase().as_ref() {
"phase1" => Ok(Phase::Phase1),
"phase2" => Ok(Phase::Phase2),
"" => Err(UtilsError::NoPhaseError.into()),
x => Err(UtilsError::UnknownPhaseError(x.to_string()).into()),
}
}
pub fn copy_file_if_exists(file_path: &str, dest_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
copy(file_path, dest_path)?;
}
Ok(())
}
pub fn download_file(url: &str, file_path: &str) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut resp = reqwest::blocking::get(url)?.error_for_status()?;
let mut out = File::create(file_path)?;
resp.copy_to(&mut out)?;
Ok(())
}
pub async fn download_file_from_azure_async(
url: &str,
expected_length: u64,
file_path: &str,
) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut out = File::create(file_path)?;
let num_chunks = (expected_length + DEFAULT_CHUNK_SIZE - 1) / DEFAULT_CHUNK_SIZE;
let mut futures = vec![];
for chunk_index in 0..num_chunks {
let url = url.to_string();
futures.push(tokio::spawn(FutureRetry::new(
move || {
let url = url.clone();
async move {
let start = chunk_index * DEFAULT_CHUNK_SIZE;
let end = if chunk_index == num_chunks - 1 {
expected_length - 1
} else {
(chunk_index + 1) * DEFAULT_CHUNK_SIZE - 1
};
let client = reqwest::Client::new();
let mut resp = client
.get(&url)
.header(CONTENT_TYPE, "application/octet-stream")
.header(RANGE, format!("bytes={}-{}", start, end))
.header(CONTENT_LENGTH, 0)
.timeout(std::time::Duration::from_secs(
DEFAULT_CHUNK_TIMEOUT_IN_SECONDS,
))
.send()
.await?
.error_for_status()?;
let mut bytes = Vec::with_capacity((end - start + 1) as usize);
while let Some(chunk) = resp.chunk().await? {
bytes.write_all(&chunk)?;
}
Ok::<Vec<u8>, anyhow::Error>(bytes)
}
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)));
}
let bytes_list = futures::future::try_join_all(futures)
.await?
.into_iter()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?
.into_iter()
.map(|(v, _)| v);
for bytes in bytes_list {
out.write_all(&bytes)?;
}
Ok(())
}
pub async fn download_file_direct_async(url: &str, file_path: &str) -> Result<()> {
let url = url.to_string();
let file_path = file_path.to_string();
FutureRetry::new(
|| async {
remove_file_if_exists(&file_path)?;
let mut resp = reqwest::get(&url).await?.error_for_status()?;
let mut out = File::create(&file_path)?;
while let Some(chunk) = resp.chunk().await? {
out.write_all(&chunk)?;
}
Ok(())
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)
.await
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?;
Ok(())
}
pub async fn upload_file_to_azure_async(file_path: &str, url: &str) -> Result<()> {
upload_sas(file_path, url).await?;
Ok(())
}
pub async fn upload_file_to_azure_with_access_key_async(
file_path: &str,
access_key: &str,
account: &str,
container: &str,
path: &str,
) -> Result<()> {
upload_access_key(file_path, access_key, account, container, path).await?;
Ok(())
}
pub async fn upload_file_direct_async(
authorization: &str,
file_path: &str,
url: &str,
) -> Result<()> {
let mut file = File::open(file_path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
let client = reqwest::Client::new();
client
.post(url)
.header(AUTHORIZATION, authorization)
.header(CONTENT_TYPE, "application/octet-stream")
.body(contents)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub fn vrs_to_rsv(rsv: &str) -> String {
format!("{}{}{}", &rsv[2..66], &rsv[66..130], &rsv[..2])
}
pub fn remove_file_if_exists(file_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
remove_file(file_path)?;
}
Ok(())
}
pub async fn get_content_length(url: &str) -> Result<u64> {
let client = reqwest::Client::new();
let result = client.head(url).send().await?.error_for_status()?;
Ok(result.headers()["content-length"]
.to_str()?
.parse::<u64>()?)
}
pub async fn get_ceremony(url: &str) -> Result<Ceremony> {
let response = reqwest::get(url).await?.error_for_status()?;
let data = response.text().await?;
let ceremony: Ceremony = serde_json::from_str::<Response<Ceremony>>(&data)?.result;
Ok(ceremony)
}
use crate::transcript_data_structs::Transcript;
use blake2::{Blake2s, Digest};
use ethers::signers::{LocalWallet, Signer};
use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy};
use rand::rngs::OsRng;
use rand::RngCore;
pub fn verify_signed_data<T: Serialize>(data: &T, signature: &str, id: &str) -> Result<()> {
let signature = Signature::from_str(&signature[2..])?;
let serialized_data = serde_json::to_string(data)?;
let deserialized_id = hex::decode(&id[2..])?;
if deserialized_id.len() != ADDRESS_LENGTH {
return Err(VerifyTranscriptError::IDWrongLength(deserialized_id.len()).into());
}
let mut address = [0u8; ADDRESS_LENGTH];
address.copy_from_slice(&deserialized_id);
let address = Address::from(address);
signature.verify(serialized_data, address)?;
Ok(())
}
pub fn read_hash_from_file(file_name: &str) -> Result<String> {
let mut hash = vec![];
File::open(file_name)
.expect("Should have opened hash file.")
.read_to_end(&mut hash)
.expect("Should have read hash file.");
let hash_hex = hex::encode(&hash);
Ok(hash_hex)
}
pub fn proving_system_from_str(proving_system_str: &str) -> Result<ProvingSystem> {
let proving_system = match proving_system_str {
"groth16" => ProvingSystem::Groth16,
"marlin" => ProvingSystem::Marlin,
_ => {
return Err(VerifyTranscriptError::UnsupportedProvingSystemError(
proving_system_str.to_string(),
)
.into());
}
};
Ok(proving_system)
}
pub fn check_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(VerifyTranscriptError::WrongChallengeHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_response_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(VerifyTranscriptError::WrongResponseHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_new_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(
VerifyTranscriptError::WrongNewChallengeHash(a.to_string(), b.to_string()).into(),
);
}
Ok(())
}
pub fn get_authorization_value(
private_key: &LocalWallet,
method: &str,
path: &str,
) -> Result<String> {
let address = private_key.address().encode_hex::<String>();
let message = format!("{} /{}", method.to_lowercase(), path.to_lowercase());
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string());
Ok(authorization)
}
pub fn create_parameters_for_chunk<E: PairingEngine>(
ceremony_parameters: &Parameters,
chunk_index: usize,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_chunk(
ContributionMode::Chunked,
chunk_index,
ceremony_parameters.chunk_size,
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn create_full_parameters<E: PairingEngine>(
ceremony_parameters: &Parameters,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_full(
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> {
let message = serde_json::to_string(value)?;
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
Ok(format!("0x{}", signature.to_string()))
}
pub fn address_to_string(address: &Address) -> String {
format!("0x{}", address.encode_hex::<String>())
}
#[derive(Debug, Clone, Copy)]
pub enum UploadMode {
Auto,
Azure,
Direct,
}
pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> {
match upload_mode {
"auto" => Ok(UploadMode::Auto),
"azure" => Ok(UploadMode::Azure),
"direct" => Ok(UploadMode::Direct),
_ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()),
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ParticipationMode {
Contribute,
Verify,
}
pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> {
match participation_mode {
"contribute" => Ok(ParticipationMode::Contribute),
"verify" => Ok(ParticipationMode::Verify),
_ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()),
}
}
fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> {
let decoded = SecretVec::new(hex::decode(encrypted)?);
let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?;
let mut output = vec![];
if let age::Decryptor::Passphrase(decryptor) = decryptor {
let mut reader = decryptor.decrypt(passphrase, None)?;
reader.read_to_end(&mut output)?;
} else {
return Err(UtilsError::UnsupportedDecryptorError.into());
}
Ok(output)
}
pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> {
let mut encrypted_output = vec![];
let mut writer = encryptor
.wrap_output(ArmoredWriter::wrap_output(
&mut encrypted_output,
Format::Binary,
)?)
.map_err(|e| match e {
EncryptError::Io(e) => e,
})?;
std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?;
writer.finish()?;
let encrypted_secret = hex::encode(&encrypted_output);
Ok(encrypted_secret.to_string())
}
pub fn read_keys(
keys_file: &str,
should_use_stdin: bool,
should_collect_extra_entropy: bool,
) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> {
let mut contents = String::new();
{
std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?;
}
let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?;
let description = "Enter your Plumo setup passphrase";
let passphrase = if should_use_stdin {
println!("{}:", description);
SecretString::new(rpassword::read_password()?)
} else {
age::cli_common::read_secret(description, "Passphrase", None)
.map_err(|_| UtilsError::CouldNotReadPassphraseError)?
};
let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?);
let plumo_private_key_from_file =
SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?);
if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() && !should_use_stdin {
let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)";
let entered_entropy = age::cli_common::read_secret(description, "Entropy", None)
.map_err(|_| UtilsError::CouldNotReadEntropyError)?;
let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone());
let mut rng = OsRng;
let mut extra_entropy = vec![0u8; 64];
rng.fill_bytes(&mut extra_entropy[..]);
let extra_entropy = SecretVec::new(extra_entropy);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(extra_entropy.expose_secret());
hasher.update(entered_entropy.expose_secret());
let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec());
let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?;
keys.encrypted_extra_entropy = Some(encrypted_extra_entropy);
let mut file = OpenOptions::new().write(true).open(&keys_file)?;
file.write_all(&serde_json::to_vec(&keys)?)?;
file.sync_all()?;
}
let plumo_seed = match keys.encrypted_extra_entropy {
None => plumo_seed_from_file,
Some(encrypted_entropy) => {
let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(plumo_seed_from_file.expose_secret());
hasher.update(entropy.expose_secret());
SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec())
}
};
Ok((plumo_seed, plumo_private_key_from_file, keys.attestation))
}
pub fn collect_processor_data() -> Result<Vec<ProcessorData>> {
cfg_if::cfg_if! {
if #[cfg(not(target_arch = "aarch64"))] {
use sysinfo::{ProcessorExt, System, SystemExt};
let s = System::new();
let processors = s
.get_processors()
.iter()
.map(|p| ProcessorData {
name: p.get_name().to_string(),
brand: p.get_brand().to_string(),
frequency: p.get_frequency().to_string(),
})
.collect();
Ok(processors)
} else {
Ok(vec![])
}
}
}
pub struct MaxRetriesHandler {
max_attempts: usize,
}
impl MaxRetriesHandler {
pub fn new(max_attempts: usize) -> Self {
MaxRetriesHandler { max_attempts }
}
}
impl ErrorHandler<anyhow::Error> for MaxRetriesHandler {
type OutError = anyhow::Error;
fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> {
warn!(
"Failed: {}, retry {}/{}",
e.to_string(),
attempt,
self.max_attempts,
);
if attempt >= self.max_attempts {
RetryPolicy::ForwardError(e)
} else {
RetryPolicy::WaitRetry(
chrono::Duration::seconds(5)
.to_std()
.expect("Should have converted to standard duration"),
)
}
}
}
pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.accumulator_size as u64
}
pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.contribution_size as u64
}
pub fn load_transcript() -> Result<Transcript> {
let filename = "transcript";
if !std::path::Path::new(filename).exists() {
let mut file = File::create(filename)?;
file.write_all(
serde_json::to_string_pretty(&Transcript {
rounds: vec![],
beacon_hash: None,
final_hash: None,
})?
.as_bytes(),
)?;
}
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?;
Ok(transcript)
}
pub fn save_transcript(transcript: &Transcript) -> Result<()> {
let filename = "transcript";
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn backup_transcript(transcript: &Transcript) -> Result<()> {
let filename = format!("transcript_{}", chrono::Utc::now().timestamp_nanos());
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn format_attestation(attestation_message: &str, address: &str, signature: &str) -> String {
format!("{} {} {}", attestation_message, address, signature)
}
pub fn extract_signature_from_attestation(attestation: &str) -> Result<(String, String, String)> {
let attestation = attestation.to_string();
let attestation_parts = attestation.split(" ").collect::<Vec<_>>();
if attestation_parts.len() < 3 {
return Err(UtilsError::AttestationTooShort(attestation_parts.len()).into());
}
Ok((
attestation_parts[0..=attestation_parts.len() - 3].join(" "),
attestation_parts[attestation_parts.len() - 2].to_string(),
attestation_parts[attestation_parts.len() - 1].to_string(),
))
}
pub fn write_attestation_to_file(attestation: &Attestation, path: &str) -> Result<()> |
pub fn trim_newline(s: &mut String) {
if s.ends_with('\n') {
s.pop();
if s.ends_with('\r') {
s.pop();
}
}
}
pub fn compute_hash_from_file(fname: &str) -> Result<String> {
let challenge_contents = std::fs::read(fname)?;
Ok(hex::encode(setup_utils::calculate_hash(
&challenge_contents,
)))
}
| {
File::create(path)?.write_all(
format_attestation(
&attestation.id,
&attestation.address,
&attestation.signature,
)
.as_bytes(),
)?;
Ok(())
} | identifier_body |
utils.rs | pub const PLUMO_SETUP_PERSONALIZATION: &[u8] = b"PLUMOSET";
pub const ADDRESS_LENGTH: usize = 20;
pub const ADDRESS_LENGTH_IN_HEX: usize = 42;
pub const SIGNATURE_LENGTH_IN_HEX: usize = 130;
pub const DEFAULT_MAX_RETRIES: usize = 5;
pub const ONE_MB: usize = 1024 * 1024;
pub const DEFAULT_CHUNK_SIZE: u64 = 1 * (ONE_MB as u64);
pub const DEFAULT_NUM_PARALLEL_CHUNKS: usize = 50;
pub const DEFAULT_CHUNK_TIMEOUT_IN_SECONDS: u64 = 300;
pub const BEACON_HASH_LENGTH: usize = 32;
use crate::blobstore::{upload_access_key, upload_sas};
use crate::data_structs::{
Attestation, Ceremony, Parameters, PlumoSetupKeys, ProcessorData, Response,
};
use crate::error::{UtilsError, VerifyTranscriptError};
use age::{
armor::{ArmoredWriter, Format},
EncryptError, Encryptor,
};
use algebra::PairingEngine;
use anyhow::Result;
use ethers::types::{Address, Signature};
use hex::ToHex;
use phase1::{ContributionMode, Phase1Parameters, ProvingSystem};
use reqwest::header::{AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, RANGE};
use secrecy::{ExposeSecret, SecretString, SecretVec};
use serde::Serialize;
use std::{
fs::{copy, remove_file, File, OpenOptions},
io::{Read, Write},
path::Path,
str::FromStr,
};
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Phase {
Phase1,
Phase2,
}
pub fn string_to_phase(str: &str) -> Result<Phase> {
match str.to_lowercase().as_ref() {
"phase1" => Ok(Phase::Phase1),
"phase2" => Ok(Phase::Phase2),
"" => Err(UtilsError::NoPhaseError.into()),
x => Err(UtilsError::UnknownPhaseError(x.to_string()).into()),
}
}
pub fn copy_file_if_exists(file_path: &str, dest_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
copy(file_path, dest_path)?;
}
Ok(())
}
pub fn download_file(url: &str, file_path: &str) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut resp = reqwest::blocking::get(url)?.error_for_status()?;
let mut out = File::create(file_path)?;
resp.copy_to(&mut out)?;
Ok(())
}
pub async fn download_file_from_azure_async(
url: &str,
expected_length: u64,
file_path: &str,
) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut out = File::create(file_path)?;
let num_chunks = (expected_length + DEFAULT_CHUNK_SIZE - 1) / DEFAULT_CHUNK_SIZE;
let mut futures = vec![];
for chunk_index in 0..num_chunks {
let url = url.to_string();
futures.push(tokio::spawn(FutureRetry::new(
move || {
let url = url.clone();
async move {
let start = chunk_index * DEFAULT_CHUNK_SIZE;
let end = if chunk_index == num_chunks - 1 {
expected_length - 1
} else {
(chunk_index + 1) * DEFAULT_CHUNK_SIZE - 1
};
let client = reqwest::Client::new();
let mut resp = client
.get(&url)
.header(CONTENT_TYPE, "application/octet-stream")
.header(RANGE, format!("bytes={}-{}", start, end))
.header(CONTENT_LENGTH, 0)
.timeout(std::time::Duration::from_secs(
DEFAULT_CHUNK_TIMEOUT_IN_SECONDS,
))
.send()
.await?
.error_for_status()?;
let mut bytes = Vec::with_capacity((end - start + 1) as usize);
while let Some(chunk) = resp.chunk().await? {
bytes.write_all(&chunk)?;
}
Ok::<Vec<u8>, anyhow::Error>(bytes)
}
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)));
}
let bytes_list = futures::future::try_join_all(futures)
.await?
.into_iter()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?
.into_iter()
.map(|(v, _)| v);
for bytes in bytes_list {
out.write_all(&bytes)?;
}
Ok(())
}
pub async fn download_file_direct_async(url: &str, file_path: &str) -> Result<()> {
let url = url.to_string();
let file_path = file_path.to_string();
FutureRetry::new(
|| async {
remove_file_if_exists(&file_path)?;
let mut resp = reqwest::get(&url).await?.error_for_status()?;
let mut out = File::create(&file_path)?;
while let Some(chunk) = resp.chunk().await? {
out.write_all(&chunk)?;
}
Ok(())
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)
.await
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?;
Ok(())
}
pub async fn upload_file_to_azure_async(file_path: &str, url: &str) -> Result<()> {
upload_sas(file_path, url).await?;
Ok(())
}
pub async fn upload_file_to_azure_with_access_key_async(
file_path: &str,
access_key: &str,
account: &str,
container: &str,
path: &str,
) -> Result<()> {
upload_access_key(file_path, access_key, account, container, path).await?;
Ok(())
}
pub async fn upload_file_direct_async(
authorization: &str,
file_path: &str,
url: &str,
) -> Result<()> {
let mut file = File::open(file_path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
let client = reqwest::Client::new();
client
.post(url)
.header(AUTHORIZATION, authorization)
.header(CONTENT_TYPE, "application/octet-stream")
.body(contents)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub fn vrs_to_rsv(rsv: &str) -> String {
format!("{}{}{}", &rsv[2..66], &rsv[66..130], &rsv[..2])
}
pub fn remove_file_if_exists(file_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
remove_file(file_path)?;
}
Ok(())
}
pub async fn get_content_length(url: &str) -> Result<u64> {
let client = reqwest::Client::new();
let result = client.head(url).send().await?.error_for_status()?;
Ok(result.headers()["content-length"] | .parse::<u64>()?)
}
pub async fn get_ceremony(url: &str) -> Result<Ceremony> {
let response = reqwest::get(url).await?.error_for_status()?;
let data = response.text().await?;
let ceremony: Ceremony = serde_json::from_str::<Response<Ceremony>>(&data)?.result;
Ok(ceremony)
}
use crate::transcript_data_structs::Transcript;
use blake2::{Blake2s, Digest};
use ethers::signers::{LocalWallet, Signer};
use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy};
use rand::rngs::OsRng;
use rand::RngCore;
pub fn verify_signed_data<T: Serialize>(data: &T, signature: &str, id: &str) -> Result<()> {
let signature = Signature::from_str(&signature[2..])?;
let serialized_data = serde_json::to_string(data)?;
let deserialized_id = hex::decode(&id[2..])?;
if deserialized_id.len() != ADDRESS_LENGTH {
return Err(VerifyTranscriptError::IDWrongLength(deserialized_id.len()).into());
}
let mut address = [0u8; ADDRESS_LENGTH];
address.copy_from_slice(&deserialized_id);
let address = Address::from(address);
signature.verify(serialized_data, address)?;
Ok(())
}
pub fn read_hash_from_file(file_name: &str) -> Result<String> {
let mut hash = vec![];
File::open(file_name)
.expect("Should have opened hash file.")
.read_to_end(&mut hash)
.expect("Should have read hash file.");
let hash_hex = hex::encode(&hash);
Ok(hash_hex)
}
pub fn proving_system_from_str(proving_system_str: &str) -> Result<ProvingSystem> {
let proving_system = match proving_system_str {
"groth16" => ProvingSystem::Groth16,
"marlin" => ProvingSystem::Marlin,
_ => {
return Err(VerifyTranscriptError::UnsupportedProvingSystemError(
proving_system_str.to_string(),
)
.into());
}
};
Ok(proving_system)
}
pub fn check_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(VerifyTranscriptError::WrongChallengeHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_response_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(VerifyTranscriptError::WrongResponseHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_new_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(
VerifyTranscriptError::WrongNewChallengeHash(a.to_string(), b.to_string()).into(),
);
}
Ok(())
}
pub fn get_authorization_value(
private_key: &LocalWallet,
method: &str,
path: &str,
) -> Result<String> {
let address = private_key.address().encode_hex::<String>();
let message = format!("{} /{}", method.to_lowercase(), path.to_lowercase());
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string());
Ok(authorization)
}
pub fn create_parameters_for_chunk<E: PairingEngine>(
ceremony_parameters: &Parameters,
chunk_index: usize,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_chunk(
ContributionMode::Chunked,
chunk_index,
ceremony_parameters.chunk_size,
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn create_full_parameters<E: PairingEngine>(
ceremony_parameters: &Parameters,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_full(
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> {
let message = serde_json::to_string(value)?;
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
Ok(format!("0x{}", signature.to_string()))
}
pub fn address_to_string(address: &Address) -> String {
format!("0x{}", address.encode_hex::<String>())
}
#[derive(Debug, Clone, Copy)]
pub enum UploadMode {
Auto,
Azure,
Direct,
}
pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> {
match upload_mode {
"auto" => Ok(UploadMode::Auto),
"azure" => Ok(UploadMode::Azure),
"direct" => Ok(UploadMode::Direct),
_ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()),
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ParticipationMode {
Contribute,
Verify,
}
pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> {
match participation_mode {
"contribute" => Ok(ParticipationMode::Contribute),
"verify" => Ok(ParticipationMode::Verify),
_ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()),
}
}
fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> {
let decoded = SecretVec::new(hex::decode(encrypted)?);
let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?;
let mut output = vec![];
if let age::Decryptor::Passphrase(decryptor) = decryptor {
let mut reader = decryptor.decrypt(passphrase, None)?;
reader.read_to_end(&mut output)?;
} else {
return Err(UtilsError::UnsupportedDecryptorError.into());
}
Ok(output)
}
pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> {
let mut encrypted_output = vec![];
let mut writer = encryptor
.wrap_output(ArmoredWriter::wrap_output(
&mut encrypted_output,
Format::Binary,
)?)
.map_err(|e| match e {
EncryptError::Io(e) => e,
})?;
std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?;
writer.finish()?;
let encrypted_secret = hex::encode(&encrypted_output);
Ok(encrypted_secret.to_string())
}
pub fn read_keys(
keys_file: &str,
should_use_stdin: bool,
should_collect_extra_entropy: bool,
) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> {
let mut contents = String::new();
{
std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?;
}
let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?;
let description = "Enter your Plumo setup passphrase";
let passphrase = if should_use_stdin {
println!("{}:", description);
SecretString::new(rpassword::read_password()?)
} else {
age::cli_common::read_secret(description, "Passphrase", None)
.map_err(|_| UtilsError::CouldNotReadPassphraseError)?
};
let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?);
let plumo_private_key_from_file =
SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?);
if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() && !should_use_stdin {
let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)";
let entered_entropy = age::cli_common::read_secret(description, "Entropy", None)
.map_err(|_| UtilsError::CouldNotReadEntropyError)?;
let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone());
let mut rng = OsRng;
let mut extra_entropy = vec![0u8; 64];
rng.fill_bytes(&mut extra_entropy[..]);
let extra_entropy = SecretVec::new(extra_entropy);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(extra_entropy.expose_secret());
hasher.update(entered_entropy.expose_secret());
let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec());
let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?;
keys.encrypted_extra_entropy = Some(encrypted_extra_entropy);
let mut file = OpenOptions::new().write(true).open(&keys_file)?;
file.write_all(&serde_json::to_vec(&keys)?)?;
file.sync_all()?;
}
let plumo_seed = match keys.encrypted_extra_entropy {
None => plumo_seed_from_file,
Some(encrypted_entropy) => {
let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(plumo_seed_from_file.expose_secret());
hasher.update(entropy.expose_secret());
SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec())
}
};
Ok((plumo_seed, plumo_private_key_from_file, keys.attestation))
}
pub fn collect_processor_data() -> Result<Vec<ProcessorData>> {
cfg_if::cfg_if! {
if #[cfg(not(target_arch = "aarch64"))] {
use sysinfo::{ProcessorExt, System, SystemExt};
let s = System::new();
let processors = s
.get_processors()
.iter()
.map(|p| ProcessorData {
name: p.get_name().to_string(),
brand: p.get_brand().to_string(),
frequency: p.get_frequency().to_string(),
})
.collect();
Ok(processors)
} else {
Ok(vec![])
}
}
}
pub struct MaxRetriesHandler {
max_attempts: usize,
}
impl MaxRetriesHandler {
pub fn new(max_attempts: usize) -> Self {
MaxRetriesHandler { max_attempts }
}
}
impl ErrorHandler<anyhow::Error> for MaxRetriesHandler {
type OutError = anyhow::Error;
fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> {
warn!(
"Failed: {}, retry {}/{}",
e.to_string(),
attempt,
self.max_attempts,
);
if attempt >= self.max_attempts {
RetryPolicy::ForwardError(e)
} else {
RetryPolicy::WaitRetry(
chrono::Duration::seconds(5)
.to_std()
.expect("Should have converted to standard duration"),
)
}
}
}
pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.accumulator_size as u64
}
pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.contribution_size as u64
}
pub fn load_transcript() -> Result<Transcript> {
let filename = "transcript";
if !std::path::Path::new(filename).exists() {
let mut file = File::create(filename)?;
file.write_all(
serde_json::to_string_pretty(&Transcript {
rounds: vec![],
beacon_hash: None,
final_hash: None,
})?
.as_bytes(),
)?;
}
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?;
Ok(transcript)
}
pub fn save_transcript(transcript: &Transcript) -> Result<()> {
let filename = "transcript";
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn backup_transcript(transcript: &Transcript) -> Result<()> {
let filename = format!("transcript_{}", chrono::Utc::now().timestamp_nanos());
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn format_attestation(attestation_message: &str, address: &str, signature: &str) -> String {
format!("{} {} {}", attestation_message, address, signature)
}
pub fn extract_signature_from_attestation(attestation: &str) -> Result<(String, String, String)> {
let attestation = attestation.to_string();
let attestation_parts = attestation.split(" ").collect::<Vec<_>>();
if attestation_parts.len() < 3 {
return Err(UtilsError::AttestationTooShort(attestation_parts.len()).into());
}
Ok((
attestation_parts[0..=attestation_parts.len() - 3].join(" "),
attestation_parts[attestation_parts.len() - 2].to_string(),
attestation_parts[attestation_parts.len() - 1].to_string(),
))
}
pub fn write_attestation_to_file(attestation: &Attestation, path: &str) -> Result<()> {
File::create(path)?.write_all(
format_attestation(
&attestation.id,
&attestation.address,
&attestation.signature,
)
.as_bytes(),
)?;
Ok(())
}
pub fn trim_newline(s: &mut String) {
if s.ends_with('\n') {
s.pop();
if s.ends_with('\r') {
s.pop();
}
}
}
pub fn compute_hash_from_file(fname: &str) -> Result<String> {
let challenge_contents = std::fs::read(fname)?;
Ok(hex::encode(setup_utils::calculate_hash(
&challenge_contents,
)))
} | .to_str()? | random_line_split |
utils.rs | pub const PLUMO_SETUP_PERSONALIZATION: &[u8] = b"PLUMOSET";
pub const ADDRESS_LENGTH: usize = 20;
pub const ADDRESS_LENGTH_IN_HEX: usize = 42;
pub const SIGNATURE_LENGTH_IN_HEX: usize = 130;
pub const DEFAULT_MAX_RETRIES: usize = 5;
pub const ONE_MB: usize = 1024 * 1024;
pub const DEFAULT_CHUNK_SIZE: u64 = 1 * (ONE_MB as u64);
pub const DEFAULT_NUM_PARALLEL_CHUNKS: usize = 50;
pub const DEFAULT_CHUNK_TIMEOUT_IN_SECONDS: u64 = 300;
pub const BEACON_HASH_LENGTH: usize = 32;
use crate::blobstore::{upload_access_key, upload_sas};
use crate::data_structs::{
Attestation, Ceremony, Parameters, PlumoSetupKeys, ProcessorData, Response,
};
use crate::error::{UtilsError, VerifyTranscriptError};
use age::{
armor::{ArmoredWriter, Format},
EncryptError, Encryptor,
};
use algebra::PairingEngine;
use anyhow::Result;
use ethers::types::{Address, Signature};
use hex::ToHex;
use phase1::{ContributionMode, Phase1Parameters, ProvingSystem};
use reqwest::header::{AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, RANGE};
use secrecy::{ExposeSecret, SecretString, SecretVec};
use serde::Serialize;
use std::{
fs::{copy, remove_file, File, OpenOptions},
io::{Read, Write},
path::Path,
str::FromStr,
};
use tracing::warn;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Phase {
Phase1,
Phase2,
}
pub fn string_to_phase(str: &str) -> Result<Phase> {
match str.to_lowercase().as_ref() {
"phase1" => Ok(Phase::Phase1),
"phase2" => Ok(Phase::Phase2),
"" => Err(UtilsError::NoPhaseError.into()),
x => Err(UtilsError::UnknownPhaseError(x.to_string()).into()),
}
}
pub fn copy_file_if_exists(file_path: &str, dest_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
copy(file_path, dest_path)?;
}
Ok(())
}
pub fn download_file(url: &str, file_path: &str) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut resp = reqwest::blocking::get(url)?.error_for_status()?;
let mut out = File::create(file_path)?;
resp.copy_to(&mut out)?;
Ok(())
}
pub async fn download_file_from_azure_async(
url: &str,
expected_length: u64,
file_path: &str,
) -> Result<()> {
remove_file_if_exists(file_path)?;
let mut out = File::create(file_path)?;
let num_chunks = (expected_length + DEFAULT_CHUNK_SIZE - 1) / DEFAULT_CHUNK_SIZE;
let mut futures = vec![];
for chunk_index in 0..num_chunks {
let url = url.to_string();
futures.push(tokio::spawn(FutureRetry::new(
move || {
let url = url.clone();
async move {
let start = chunk_index * DEFAULT_CHUNK_SIZE;
let end = if chunk_index == num_chunks - 1 {
expected_length - 1
} else {
(chunk_index + 1) * DEFAULT_CHUNK_SIZE - 1
};
let client = reqwest::Client::new();
let mut resp = client
.get(&url)
.header(CONTENT_TYPE, "application/octet-stream")
.header(RANGE, format!("bytes={}-{}", start, end))
.header(CONTENT_LENGTH, 0)
.timeout(std::time::Duration::from_secs(
DEFAULT_CHUNK_TIMEOUT_IN_SECONDS,
))
.send()
.await?
.error_for_status()?;
let mut bytes = Vec::with_capacity((end - start + 1) as usize);
while let Some(chunk) = resp.chunk().await? {
bytes.write_all(&chunk)?;
}
Ok::<Vec<u8>, anyhow::Error>(bytes)
}
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)));
}
let bytes_list = futures::future::try_join_all(futures)
.await?
.into_iter()
.collect::<Result<Vec<_>, _>>()
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?
.into_iter()
.map(|(v, _)| v);
for bytes in bytes_list {
out.write_all(&bytes)?;
}
Ok(())
}
pub async fn download_file_direct_async(url: &str, file_path: &str) -> Result<()> {
let url = url.to_string();
let file_path = file_path.to_string();
FutureRetry::new(
|| async {
remove_file_if_exists(&file_path)?;
let mut resp = reqwest::get(&url).await?.error_for_status()?;
let mut out = File::create(&file_path)?;
while let Some(chunk) = resp.chunk().await? {
out.write_all(&chunk)?;
}
Ok(())
},
MaxRetriesHandler::new(DEFAULT_MAX_RETRIES),
)
.await
.map_err(|e| UtilsError::RetryFailedError(e.0.to_string()))?;
Ok(())
}
pub async fn upload_file_to_azure_async(file_path: &str, url: &str) -> Result<()> {
upload_sas(file_path, url).await?;
Ok(())
}
pub async fn upload_file_to_azure_with_access_key_async(
file_path: &str,
access_key: &str,
account: &str,
container: &str,
path: &str,
) -> Result<()> {
upload_access_key(file_path, access_key, account, container, path).await?;
Ok(())
}
pub async fn upload_file_direct_async(
authorization: &str,
file_path: &str,
url: &str,
) -> Result<()> {
let mut file = File::open(file_path)?;
let mut contents = Vec::new();
file.read_to_end(&mut contents)?;
let client = reqwest::Client::new();
client
.post(url)
.header(AUTHORIZATION, authorization)
.header(CONTENT_TYPE, "application/octet-stream")
.body(contents)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub fn vrs_to_rsv(rsv: &str) -> String {
format!("{}{}{}", &rsv[2..66], &rsv[66..130], &rsv[..2])
}
pub fn remove_file_if_exists(file_path: &str) -> Result<()> {
if Path::new(file_path).exists() {
remove_file(file_path)?;
}
Ok(())
}
pub async fn get_content_length(url: &str) -> Result<u64> {
let client = reqwest::Client::new();
let result = client.head(url).send().await?.error_for_status()?;
Ok(result.headers()["content-length"]
.to_str()?
.parse::<u64>()?)
}
pub async fn get_ceremony(url: &str) -> Result<Ceremony> {
let response = reqwest::get(url).await?.error_for_status()?;
let data = response.text().await?;
let ceremony: Ceremony = serde_json::from_str::<Response<Ceremony>>(&data)?.result;
Ok(ceremony)
}
use crate::transcript_data_structs::Transcript;
use blake2::{Blake2s, Digest};
use ethers::signers::{LocalWallet, Signer};
use futures_retry::{ErrorHandler, FutureRetry, RetryPolicy};
use rand::rngs::OsRng;
use rand::RngCore;
pub fn verify_signed_data<T: Serialize>(data: &T, signature: &str, id: &str) -> Result<()> {
let signature = Signature::from_str(&signature[2..])?;
let serialized_data = serde_json::to_string(data)?;
let deserialized_id = hex::decode(&id[2..])?;
if deserialized_id.len() != ADDRESS_LENGTH {
return Err(VerifyTranscriptError::IDWrongLength(deserialized_id.len()).into());
}
let mut address = [0u8; ADDRESS_LENGTH];
address.copy_from_slice(&deserialized_id);
let address = Address::from(address);
signature.verify(serialized_data, address)?;
Ok(())
}
pub fn read_hash_from_file(file_name: &str) -> Result<String> {
let mut hash = vec![];
File::open(file_name)
.expect("Should have opened hash file.")
.read_to_end(&mut hash)
.expect("Should have read hash file.");
let hash_hex = hex::encode(&hash);
Ok(hash_hex)
}
pub fn proving_system_from_str(proving_system_str: &str) -> Result<ProvingSystem> {
let proving_system = match proving_system_str {
"groth16" => ProvingSystem::Groth16,
"marlin" => ProvingSystem::Marlin,
_ => {
return Err(VerifyTranscriptError::UnsupportedProvingSystemError(
proving_system_str.to_string(),
)
.into());
}
};
Ok(proving_system)
}
pub fn check_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(VerifyTranscriptError::WrongChallengeHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_response_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(VerifyTranscriptError::WrongResponseHash(a.to_string(), b.to_string()).into());
}
Ok(())
}
pub fn check_new_challenge_hashes_same(a: &str, b: &str) -> Result<()> {
if a != b {
return Err(
VerifyTranscriptError::WrongNewChallengeHash(a.to_string(), b.to_string()).into(),
);
}
Ok(())
}
pub fn get_authorization_value(
private_key: &LocalWallet,
method: &str,
path: &str,
) -> Result<String> {
let address = private_key.address().encode_hex::<String>();
let message = format!("{} /{}", method.to_lowercase(), path.to_lowercase());
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
let authorization = format!("Celo 0x{}:0x{}", address, signature.to_string());
Ok(authorization)
}
pub fn create_parameters_for_chunk<E: PairingEngine>(
ceremony_parameters: &Parameters,
chunk_index: usize,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_chunk(
ContributionMode::Chunked,
chunk_index,
ceremony_parameters.chunk_size,
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn create_full_parameters<E: PairingEngine>(
ceremony_parameters: &Parameters,
) -> Result<Phase1Parameters<E>> {
let proving_system = proving_system_from_str(ceremony_parameters.proving_system.as_str())?;
let parameters = Phase1Parameters::<E>::new_full(
proving_system,
ceremony_parameters.power,
ceremony_parameters.batch_size,
);
Ok(parameters)
}
pub fn sign_json(private_key: &LocalWallet, value: &serde_json::Value) -> Result<String> {
let message = serde_json::to_string(value)?;
let signature: Signature = futures::executor::block_on(private_key.sign_message(message))?;
Ok(format!("0x{}", signature.to_string()))
}
pub fn address_to_string(address: &Address) -> String {
format!("0x{}", address.encode_hex::<String>())
}
#[derive(Debug, Clone, Copy)]
pub enum UploadMode {
Auto,
Azure,
Direct,
}
pub fn upload_mode_from_str(upload_mode: &str) -> Result<UploadMode> {
match upload_mode {
"auto" => Ok(UploadMode::Auto),
"azure" => Ok(UploadMode::Azure),
"direct" => Ok(UploadMode::Direct),
_ => Err(UtilsError::UnknownUploadModeError(upload_mode.to_string()).into()),
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ParticipationMode {
Contribute,
Verify,
}
pub fn participation_mode_from_str(participation_mode: &str) -> Result<ParticipationMode> {
match participation_mode {
"contribute" => Ok(ParticipationMode::Contribute),
"verify" => Ok(ParticipationMode::Verify),
_ => Err(UtilsError::UnknownParticipationModeError(participation_mode.to_string()).into()),
}
}
fn decrypt(passphrase: &SecretString, encrypted: &str) -> Result<Vec<u8>> {
let decoded = SecretVec::new(hex::decode(encrypted)?);
let decryptor = age::Decryptor::new(decoded.expose_secret().as_slice())?;
let mut output = vec![];
if let age::Decryptor::Passphrase(decryptor) = decryptor {
let mut reader = decryptor.decrypt(passphrase, None)?;
reader.read_to_end(&mut output)?;
} else {
return Err(UtilsError::UnsupportedDecryptorError.into());
}
Ok(output)
}
pub fn encrypt(encryptor: Encryptor, secret: &[u8]) -> Result<String> {
let mut encrypted_output = vec![];
let mut writer = encryptor
.wrap_output(ArmoredWriter::wrap_output(
&mut encrypted_output,
Format::Binary,
)?)
.map_err(|e| match e {
EncryptError::Io(e) => e,
})?;
std::io::copy(&mut std::io::Cursor::new(secret), &mut writer)?;
writer.finish()?;
let encrypted_secret = hex::encode(&encrypted_output);
Ok(encrypted_secret.to_string())
}
pub fn read_keys(
keys_file: &str,
should_use_stdin: bool,
should_collect_extra_entropy: bool,
) -> Result<(SecretVec<u8>, SecretVec<u8>, Attestation)> {
let mut contents = String::new();
{
std::fs::File::open(&keys_file)?.read_to_string(&mut contents)?;
}
let mut keys: PlumoSetupKeys = serde_json::from_str(&contents)?;
let description = "Enter your Plumo setup passphrase";
let passphrase = if should_use_stdin {
println!("{}:", description);
SecretString::new(rpassword::read_password()?)
} else {
age::cli_common::read_secret(description, "Passphrase", None)
.map_err(|_| UtilsError::CouldNotReadPassphraseError)?
};
let plumo_seed_from_file = SecretVec::new(decrypt(&passphrase, &keys.encrypted_seed)?);
let plumo_private_key_from_file =
SecretVec::new(decrypt(&passphrase, &keys.encrypted_private_key)?);
if should_collect_extra_entropy && keys.encrypted_extra_entropy.is_none() && !should_use_stdin {
let description = "Enter some extra entropy (this should only be done at the first time you run the contribute binary!)";
let entered_entropy = age::cli_common::read_secret(description, "Entropy", None)
.map_err(|_| UtilsError::CouldNotReadEntropyError)?;
let encryptor = age::Encryptor::with_user_passphrase(passphrase.clone());
let mut rng = OsRng;
let mut extra_entropy = vec![0u8; 64];
rng.fill_bytes(&mut extra_entropy[..]);
let extra_entropy = SecretVec::new(extra_entropy);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(extra_entropy.expose_secret());
hasher.update(entered_entropy.expose_secret());
let combined_entropy = SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec());
let encrypted_extra_entropy = encrypt(encryptor, combined_entropy.expose_secret())?;
keys.encrypted_extra_entropy = Some(encrypted_extra_entropy);
let mut file = OpenOptions::new().write(true).open(&keys_file)?;
file.write_all(&serde_json::to_vec(&keys)?)?;
file.sync_all()?;
}
let plumo_seed = match keys.encrypted_extra_entropy {
None => plumo_seed_from_file,
Some(encrypted_entropy) => {
let entropy = SecretVec::new(decrypt(&passphrase, &encrypted_entropy)?);
let mut hasher = Blake2s::with_params(&[], &[], PLUMO_SETUP_PERSONALIZATION);
hasher.update(plumo_seed_from_file.expose_secret());
hasher.update(entropy.expose_secret());
SecretVec::<u8>::new(hasher.finalize().as_slice().to_vec())
}
};
Ok((plumo_seed, plumo_private_key_from_file, keys.attestation))
}
pub fn collect_processor_data() -> Result<Vec<ProcessorData>> {
cfg_if::cfg_if! {
if #[cfg(not(target_arch = "aarch64"))] {
use sysinfo::{ProcessorExt, System, SystemExt};
let s = System::new();
let processors = s
.get_processors()
.iter()
.map(|p| ProcessorData {
name: p.get_name().to_string(),
brand: p.get_brand().to_string(),
frequency: p.get_frequency().to_string(),
})
.collect();
Ok(processors)
} else {
Ok(vec![])
}
}
}
pub struct MaxRetriesHandler {
max_attempts: usize,
}
impl MaxRetriesHandler {
pub fn new(max_attempts: usize) -> Self {
MaxRetriesHandler { max_attempts }
}
}
impl ErrorHandler<anyhow::Error> for MaxRetriesHandler {
type OutError = anyhow::Error;
fn handle(&mut self, attempt: usize, e: anyhow::Error) -> RetryPolicy<Self::OutError> {
warn!(
"Failed: {}, retry {}/{}",
e.to_string(),
attempt,
self.max_attempts,
);
if attempt >= self.max_attempts {
RetryPolicy::ForwardError(e)
} else {
RetryPolicy::WaitRetry(
chrono::Duration::seconds(5)
.to_std()
.expect("Should have converted to standard duration"),
)
}
}
}
pub fn challenge_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.accumulator_size as u64
}
pub fn response_size<E: PairingEngine>(parameters: &Phase1Parameters<E>) -> u64 {
parameters.contribution_size as u64
}
pub fn load_transcript() -> Result<Transcript> {
let filename = "transcript";
if !std::path::Path::new(filename).exists() {
let mut file = File::create(filename)?;
file.write_all(
serde_json::to_string_pretty(&Transcript {
rounds: vec![],
beacon_hash: None,
final_hash: None,
})?
.as_bytes(),
)?;
}
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let transcript: Transcript = serde_json::from_str::<Transcript>(&contents)?;
Ok(transcript)
}
pub fn save_transcript(transcript: &Transcript) -> Result<()> {
let filename = "transcript";
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn | (transcript: &Transcript) -> Result<()> {
let filename = format!("transcript_{}", chrono::Utc::now().timestamp_nanos());
let mut file = File::create(filename)?;
file.write_all(serde_json::to_string_pretty(transcript)?.as_bytes())?;
Ok(())
}
pub fn format_attestation(attestation_message: &str, address: &str, signature: &str) -> String {
format!("{} {} {}", attestation_message, address, signature)
}
pub fn extract_signature_from_attestation(attestation: &str) -> Result<(String, String, String)> {
let attestation = attestation.to_string();
let attestation_parts = attestation.split(" ").collect::<Vec<_>>();
if attestation_parts.len() < 3 {
return Err(UtilsError::AttestationTooShort(attestation_parts.len()).into());
}
Ok((
attestation_parts[0..=attestation_parts.len() - 3].join(" "),
attestation_parts[attestation_parts.len() - 2].to_string(),
attestation_parts[attestation_parts.len() - 1].to_string(),
))
}
pub fn write_attestation_to_file(attestation: &Attestation, path: &str) -> Result<()> {
File::create(path)?.write_all(
format_attestation(
&attestation.id,
&attestation.address,
&attestation.signature,
)
.as_bytes(),
)?;
Ok(())
}
pub fn trim_newline(s: &mut String) {
if s.ends_with('\n') {
s.pop();
if s.ends_with('\r') {
s.pop();
}
}
}
pub fn compute_hash_from_file(fname: &str) -> Result<String> {
let challenge_contents = std::fs::read(fname)?;
Ok(hex::encode(setup_utils::calculate_hash(
&challenge_contents,
)))
}
| backup_transcript | identifier_name |
schema.py | import uuid
import django_filters
import graphene
import graphql_geojson
from django.apps import apps
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from graphene import ID, ObjectType, relay, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql import GraphQLError
from graphql_geojson.filters import DistanceFilter
from categories.models import Category
from features import models
from features.enums import HarborMooringType, OverrideFieldType, Visibility, Weekday
from utils.graphene import LanguageEnum, StringListFilter
HarborMooringTypeEnum = graphene.Enum.from_enum(
HarborMooringType, description=lambda e: e.label if e else ""
)
WeekdayEnum = graphene.Enum.from_enum(
Weekday, description=lambda e: e.label if e else ""
)
class Address(ObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
class ContactInfo(DjangoObjectType):
"""Contact information for the given feature."""
class Meta:
model = models.ContactInfo
fields = ("email", "phone_number")
address = graphene.Field(Address)
def resolve_address(self: models.ContactInfo, info, **kwargs):
return {
"street_address": self.street_address,
"postal_code": self.postal_code,
"municipality": self.municipality,
}
class ExternalLink(DjangoObjectType):
"""Link to an external system.
Link can be e.g. to an online store, a berth rental or to ferry information.
"""
class Meta:
model = models.Link
fields = (
"type",
"url",
)
class FeatureSource(ObjectType):
"""Source system information for a feature."""
system = graphene.String(
required=True,
description=_(
"Name of the source system (e.g. 'myhelsinki', 'ahti', "
"'ulkoliikuntakartta', 'digitransit')"
),
)
type = graphene.String(
required=True,
description=_(
"Type of the feature in the source system, if applicable (e.g. 'place', "
"'activity', 'event', 'route')"
),
)
id = graphene.String(
required=True, description="ID of the current feature in source system"
)
class PriceTag(DjangoObjectType):
"""An item displayed in a price list."""
class Meta:
model = models.PriceTag
fields = ("price",)
item = graphene.String(required=True, description=_("Name of the item"))
price = graphene.Decimal(required=True, description=_("Price of the item in EUR"))
unit = graphene.String(
description=_(
"Unit of the price (e.g. 'hour', 'day', 'piece', 'person', 'child', "
"'one way')"
),
)
class Teaser(DjangoObjectType):
"""Simple content element (e.g. something special about a feature)."""
class Meta:
model = models.FeatureTeaser
fields = () # Don't include any fields from the model automatically
header = graphene.String(
description=_("An opening, e.g. 'Starting' from 'Starting from 7€/day.'")
)
main = graphene.String(description=_("The meat of the deal, '7€/day' part"))
class FeatureTranslations(DjangoObjectType):
"Values in other languages for the feature attributes that can have translations."
language_code = LanguageEnum(required=True)
class Meta:
model = apps.get_model("features", "FeatureTranslation")
exclude = ("id", "master")
class Image(DjangoObjectType):
class Meta:
model = models.Image
fields = (
"url",
"copyright_owner",
"license",
)
class License(DjangoObjectType):
class Meta:
model = models.License
fields = ("id",)
name = graphene.String(required=True, description=_("Display name of the license"))
class Tag( | ngoObjectType):
"""Tags are associated with things (like features)."""
class Meta:
model = models.Tag
fields = ("id", "features")
name = graphene.String(required=True, description=_("Display name of the tag"))
class OpeningHoursPeriod(DjangoObjectType):
"""A period during which certain opening hours are valid."""
class Meta:
model = models.OpeningHoursPeriod
fields = (
"valid_from",
"valid_to",
"opening_hours",
)
comment = graphene.String(
description=_(
"Comment for this opening hour period (e.g. 'Exceptional opening hours "
"during Midsummer')"
),
)
class OpeningHours(DjangoObjectType):
"""The daily opening hours / hours of operation of something."""
class Meta:
model = models.OpeningHours
fields = (
"opens",
"closes",
"all_day",
)
day = WeekdayEnum(required=True, description=_("Day of week"))
class Depth(ObjectType):
"""The depth of something, in meters.
Can be a single value (min and max are equal) or a range.
(Consider: harbor/lake/pool/mineshaft)."
"""
min = graphene.Float(
required=True,
description=_(
"An approximation of the minimum depth (or lower end of the range)"
),
)
max = graphene.Float(
required=True,
description=_(
"An approximation of the maximum depth (or deeper end of the range)"
),
)
class HarborDetails(ObjectType):
"""Information specific to harbors (and piers)."""
moorings = graphene.List(
graphene.NonNull(HarborMooringTypeEnum),
description=_("Mooring types available in the harbor"),
)
depth = graphene.Field(
Depth, description=_("Approximate depth of the harbor, in meters")
)
def resolve_moorings(self: models.FeatureDetails, info, **kwargs):
return self.data["berth_moorings"]
def resolve_depth(self: models.FeatureDetails, info, **kwargs):
"""Minimum depth is mandatory, maximum is included for a range."""
min = self.data.get("berth_min_depth")
max = self.data.get("berth_max_depth")
if min is None:
return None
return {
"min": min,
"max": max,
}
class FeatureDetails(ObjectType):
"""Detailed information a feature might have."""
harbor = graphene.Field(HarborDetails, description=_("Details of a harbor"))
price_list = graphene.List(
"features.schema.PriceTag",
required=True,
description=_("Price list related to a feature"),
)
class FeatureFilter(django_filters.FilterSet):
"""Contains the filters to use when retrieving features."""
class Meta:
model = models.Feature
fields = [
"distance_lte",
"updated_since",
"tagged_with_any",
"tagged_with_all",
"category",
]
distance_lte = DistanceFilter(
field_name="geometry",
lookup_expr="distance_lte",
label=_("Fetch features within a given distance from the given geometry"),
)
updated_since = django_filters.IsoDateTimeFilter(
method="filter_updated_since",
label=_("Fetch features that have changed since specified timestamp"),
)
tagged_with_any = StringListFilter(
method="filter_tagged_with_any",
label=_("Fetch features tagged with any of the specified tags (ids)"),
)
tagged_with_all = StringListFilter(
method="filter_tagged_with_all",
label=_("Fetch features tagged with all of the specified tags (ids)"),
)
category = StringListFilter(
method="filter_category", label=_("Fetch features from included categories")
)
def filter_updated_since(self, queryset, name, value):
return queryset.filter(
Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value)
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_any(self, queryset, name, value):
return queryset.filter(
tags__in=value
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_all(self, queryset, name, value):
for v in value:
queryset = queryset.filter(tags=v)
return queryset
def filter_category(self, queryset, name, value):
return queryset.filter(category__in=value)
class Feature(graphql_geojson.GeoJSONType):
"""Features in Ahti are structured according to GeoJSON specification.
All Ahti specific attributes are contained within attribute `properties`.
**Note!** `Feature.type` always has the value `Feature`.
"""
class Meta:
fields = (
"id",
"category",
"created_at",
"contact_info",
"teaser",
"details",
"geometry",
"images",
"links",
"opening_hours_periods",
"tags",
"translations",
)
filterset_class = FeatureFilter
model = models.Feature
geojson_field = "geometry"
interfaces = (relay.Node,)
ahti_id = graphene.String(
required=True,
description=_(
"Human readable ID. Format examples: "
"'ahti:feature:12C4' or 'myhelsinki:place:5678'"
),
)
source = graphene.Field(
FeatureSource, required=True, description=_("Source of the feature")
)
name = graphene.String(required=True, description=_("Name of the feature"))
one_liner = graphene.String(
required=True, description=_("Short introductory text or a tagline")
)
description = graphene.String(description=_("Description of the feature"))
details = graphene.Field(
FeatureDetails, description=_("Detailed information a feature might have")
)
url = graphene.String(description=_("URL for more information about this feature"))
modified_at = graphene.DateTime(required=True)
parents = graphene.List(
"features.schema.Feature",
required=True,
description=_("Parents of this feature"),
)
children = graphene.List(
"features.schema.Feature",
required=True,
description=_(
"Children of this feature (ex. stops along a route, piers of a harbor etc.)"
),
)
def resolve_source(self: models.Feature, info, **kwargs):
return {
"system": self.source_type.system,
"type": self.source_type.type,
"id": self.source_id,
}
def resolve_name(self: models.Feature, info, **kwargs):
name_override = self.overrides.filter(field=OverrideFieldType.NAME).first()
if name_override:
return name_override.value
return self.name
def resolve_modified_at(self: models.Feature, info, **kwargs):
latest_override = self.overrides.order_by("-modified_at").first()
return (
max(self.source_modified_at, latest_override.modified_at)
if latest_override
else self.source_modified_at
)
def resolve_details(self: models.Feature, info, **kwargs):
details = {}
for detail in self.details.all():
# Default dict resolver will resolve this for FeatureDetails
details[detail.type.lower()] = detail
# PriceTags have a relation to Feature model, so we resolve it separately
details["price_list"] = self.price_tags.all()
return details if details else None
def resolve_parents(self: models.Feature, info, **kwargs):
return self.parents.all()
def resolve_children(self: models.Feature, info, **kwargs):
return self.children.all()
@classmethod
def get_queryset(cls, queryset, info):
return (
queryset.filter(visibility=Visibility.VISIBLE)
.select_related("source_type", "category", "teaser")
.prefetch_related(
"category__translations",
"contact_info",
"children",
"details",
"price_tags",
"price_tags__translations",
"images",
"images__license",
"images__license__translations",
"links",
"opening_hours_periods",
"opening_hours_periods__opening_hours",
"opening_hours_periods__translations",
"parents",
"tags",
"tags__translations",
"teaser__translations",
"translations",
)
)
class FeatureTranslationsInput(graphene.InputObjectType):
language_code = LanguageEnum(required=True)
name = graphene.String(required=True, description=_("Name of the feature"))
description = graphene.String(description=_("Description of the feature"))
url = graphene.String(description=_("URL for more information about this feature"))
one_liner = graphene.String(description=_("Short introductory text or a tagline"))
class ContactInfoInput(graphene.InputObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
phone_number = graphene.String()
email = graphene.String()
class CreateFeatureMutation(relay.ClientIDMutation):
class Input:
translations = graphene.List(
graphene.NonNull(FeatureTranslationsInput), required=True
)
geometry = graphql_geojson.Geometry(required=True)
contact_info = ContactInfoInput()
category_id = graphene.String()
tag_ids = graphene.List(graphene.String)
feature = graphene.Field(Feature)
@classmethod
def get_source_type(cls):
st, created = models.SourceType.objects.get_or_create(system="ahti", type="api")
return st
@classmethod
@transaction.atomic
def mutate_and_get_payload(cls, root, info, **kwargs):
contact_info_values = kwargs.pop("contact_info", None)
tag_ids = kwargs.pop("tag_ids", None)
category_id = kwargs.pop("category_id", None)
now = timezone.now()
values = {
"source_type": cls.get_source_type(),
"source_id": uuid.uuid4(),
"source_modified_at": now,
"mapped_at": now,
"visibility": Visibility.DRAFT,
}
values.update(kwargs)
if category_id:
values["category"] = Category.objects.get(id=category_id)
if tag_ids:
tags = [models.Tag.objects.get(id=tag_id) for tag_id in tag_ids]
else:
tags = []
feature = models.Feature.objects.create_translatable_object(**values)
if contact_info_values:
ci = models.ContactInfo.objects.create(
feature=feature, **contact_info_values
)
ci.full_clean()
ci.save()
if tags:
feature.tags.set(tags)
return CreateFeatureMutation(feature=feature)
class Query(graphene.ObjectType):
features = DjangoFilterConnectionField(
Feature, description=_("Retrieve all features matching the given filters")
)
feature = graphene.Field(
Feature,
id=ID(description=_("The ID of the object")),
ahti_id=String(description=_("Ahti ID of the object")),
description=_("Retrieve a single feature"),
)
tags = graphene.List(Tag, description=_("Retrieve all tags"))
def resolve_feature(self, info, id=None, ahti_id=None, **kwargs):
if id:
return relay.Node.get_node_from_global_id(info, id, only_type=Feature)
if ahti_id:
try:
return Feature.get_queryset(models.Feature.objects, info).ahti_id(
ahti_id=ahti_id
)
except models.Feature.DoesNotExist:
return None
raise GraphQLError("You must provide either `id` or `ahtiId`.")
def resolve_tags(self, info, **kwargs):
return models.Tag.objects.all()
class Mutation(graphene.ObjectType):
create_feature = CreateFeatureMutation.Field(
description=_(
"Create a new feature into the system which will go through a"
"review before it is published into the API."
)
)
| Dja | identifier_name |
schema.py | import uuid
import django_filters
import graphene
import graphql_geojson
from django.apps import apps
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from graphene import ID, ObjectType, relay, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql import GraphQLError
from graphql_geojson.filters import DistanceFilter
from categories.models import Category
from features import models
from features.enums import HarborMooringType, OverrideFieldType, Visibility, Weekday
from utils.graphene import LanguageEnum, StringListFilter
HarborMooringTypeEnum = graphene.Enum.from_enum(
HarborMooringType, description=lambda e: e.label if e else ""
)
WeekdayEnum = graphene.Enum.from_enum(
Weekday, description=lambda e: e.label if e else ""
)
class Address(ObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
class ContactInfo(DjangoObjectType):
"""Contact information for the given feature."""
class Meta:
model = models.ContactInfo
fields = ("email", "phone_number")
address = graphene.Field(Address)
def resolve_address(self: models.ContactInfo, info, **kwargs):
return {
"street_address": self.street_address,
"postal_code": self.postal_code,
"municipality": self.municipality,
}
class ExternalLink(DjangoObjectType):
"""Link to an external system.
Link can be e.g. to an online store, a berth rental or to ferry information.
"""
class Meta:
model = models.Link
fields = (
"type",
"url",
)
class FeatureSource(ObjectType):
"""Source system information for a feature."""
system = graphene.String(
required=True,
description=_(
"Name of the source system (e.g. 'myhelsinki', 'ahti', "
"'ulkoliikuntakartta', 'digitransit')"
),
)
type = graphene.String(
required=True,
description=_(
"Type of the feature in the source system, if applicable (e.g. 'place', "
"'activity', 'event', 'route')"
),
)
id = graphene.String(
required=True, description="ID of the current feature in source system"
)
class PriceTag(DjangoObjectType):
"""An item displayed in a price list."""
class Meta:
model = models.PriceTag
fields = ("price",)
item = graphene.String(required=True, description=_("Name of the item"))
price = graphene.Decimal(required=True, description=_("Price of the item in EUR"))
unit = graphene.String(
description=_(
"Unit of the price (e.g. 'hour', 'day', 'piece', 'person', 'child', "
"'one way')"
),
)
class Teaser(DjangoObjectType):
"""Simple content element (e.g. something special about a feature)."""
class Meta:
model = models.FeatureTeaser
fields = () # Don't include any fields from the model automatically
header = graphene.String(
description=_("An opening, e.g. 'Starting' from 'Starting from 7€/day.'")
)
main = graphene.String(description=_("The meat of the deal, '7€/day' part"))
class FeatureTranslations(DjangoObjectType):
"Values in other languages for the feature attributes that can have translations."
language_code = LanguageEnum(required=True)
class Meta:
model = apps.get_model("features", "FeatureTranslation")
exclude = ("id", "master")
class Image(DjangoObjectType):
class Meta:
model = models.Image
fields = (
"url",
"copyright_owner",
"license",
)
class License(DjangoObjectType):
class Meta:
model = models.License
fields = ("id",)
name = graphene.String(required=True, description=_("Display name of the license"))
class Tag(DjangoObjectType):
"""Tags are associated with things (like features)."""
class Meta:
model = models.Tag
fields = ("id", "features")
name = graphene.String(required=True, description=_("Display name of the tag"))
class OpeningHoursPeriod(DjangoObjectType):
"""A period during which certain opening hours are valid."""
class Meta:
model = models.OpeningHoursPeriod
fields = (
"valid_from",
"valid_to",
"opening_hours",
)
comment = graphene.String(
description=_(
"Comment for this opening hour period (e.g. 'Exceptional opening hours "
"during Midsummer')"
),
)
class OpeningHours(DjangoObjectType):
"""The daily opening hours / hours of operation of something."""
class Meta:
model = models.OpeningHours
fields = (
"opens",
"closes",
"all_day",
)
day = WeekdayEnum(required=True, description=_("Day of week"))
class Depth(ObjectType):
"""The depth of something, in meters.
Can be a single value (min and max are equal) or a range.
(Consider: harbor/lake/pool/mineshaft)."
"""
min = graphene.Float(
required=True,
description=_(
"An approximation of the minimum depth (or lower end of the range)"
),
)
max = graphene.Float(
required=True,
description=_(
"An approximation of the maximum depth (or deeper end of the range)"
),
)
class HarborDetails(ObjectType):
"""Information specific to harbors (and piers)."""
moorings = graphene.List(
graphene.NonNull(HarborMooringTypeEnum),
description=_("Mooring types available in the harbor"),
)
depth = graphene.Field(
Depth, description=_("Approximate depth of the harbor, in meters")
)
def resolve_moorings(self: models.FeatureDetails, info, **kwargs):
return self.data["berth_moorings"]
def resolve_depth(self: models.FeatureDetails, info, **kwargs):
"""Minimum depth is mandatory, maximum is included for a range."""
min = self.data.get("berth_min_depth")
max = self.data.get("berth_max_depth")
if min is None:
return None
return {
"min": min,
"max": max,
}
class FeatureDetails(ObjectType):
"""Detailed information a feature might have."""
harbor = graphene.Field(HarborDetails, description=_("Details of a harbor"))
price_list = graphene.List(
"features.schema.PriceTag",
required=True,
description=_("Price list related to a feature"),
)
class FeatureFilter(django_filters.FilterSet):
"""Contains the filters to use when retrieving features."""
class Meta:
model = models.Feature
fields = [
"distance_lte",
"updated_since",
"tagged_with_any",
"tagged_with_all",
"category",
]
distance_lte = DistanceFilter(
field_name="geometry",
lookup_expr="distance_lte",
label=_("Fetch features within a given distance from the given geometry"),
)
updated_since = django_filters.IsoDateTimeFilter(
method="filter_updated_since",
label=_("Fetch features that have changed since specified timestamp"),
)
tagged_with_any = StringListFilter(
method="filter_tagged_with_any",
label=_("Fetch features tagged with any of the specified tags (ids)"),
)
tagged_with_all = StringListFilter(
method="filter_tagged_with_all",
label=_("Fetch features tagged with all of the specified tags (ids)"),
)
category = StringListFilter(
method="filter_category", label=_("Fetch features from included categories")
)
def filter_updated_since(self, queryset, name, value):
return queryset.filter(
Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value)
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_any(self, queryset, name, value):
return queryset.filter(
tags__in=value
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_all(self, queryset, name, value):
for v in value:
queryset = queryset.filter(tags=v)
return queryset
def filter_category(self, queryset, name, value):
return queryset.filter(category__in=value)
class Feature(graphql_geojson.GeoJSONType):
"""Features in Ahti are structured according to GeoJSON specification.
All Ahti specific attributes are contained within attribute `properties`.
**Note!** `Feature.type` always has the value `Feature`.
"""
class Meta:
fields = (
"id",
"category",
"created_at",
"contact_info",
"teaser",
"details",
"geometry",
"images",
"links",
"opening_hours_periods",
"tags",
"translations",
)
filterset_class = FeatureFilter
model = models.Feature
geojson_field = "geometry"
interfaces = (relay.Node,)
ahti_id = graphene.String(
required=True,
description=_(
"Human readable ID. Format examples: "
"'ahti:feature:12C4' or 'myhelsinki:place:5678'"
),
)
source = graphene.Field(
FeatureSource, required=True, description=_("Source of the feature")
)
name = graphene.String(required=True, description=_("Name of the feature"))
one_liner = graphene.String(
required=True, description=_("Short introductory text or a tagline")
)
description = graphene.String(description=_("Description of the feature"))
details = graphene.Field(
FeatureDetails, description=_("Detailed information a feature might have")
)
url = graphene.String(description=_("URL for more information about this feature"))
modified_at = graphene.DateTime(required=True)
parents = graphene.List(
"features.schema.Feature",
required=True,
description=_("Parents of this feature"),
)
children = graphene.List(
"features.schema.Feature",
required=True,
description=_(
"Children of this feature (ex. stops along a route, piers of a harbor etc.)"
),
)
def resolve_source(self: models.Feature, info, **kwargs):
return {
"system": self.source_type.system,
"type": self.source_type.type,
"id": self.source_id,
}
def resolve_name(self: models.Feature, info, **kwargs):
name_override = self.overrides.filter(field=OverrideFieldType.NAME).first()
if name_override:
return name_override.value
return self.name
def resolve_modified_at(self: models.Feature, info, **kwargs):
latest_override = self.overrides.order_by("-modified_at").first()
return (
max(self.source_modified_at, latest_override.modified_at)
if latest_override
else self.source_modified_at
)
def resolve_details(self: models.Feature, info, **kwargs):
details = {}
for detail in self.details.all():
# Default dict resolver will resolve this for FeatureDetails
details[detail.type.lower()] = detail
# PriceTags have a relation to Feature model, so we resolve it separately
details["price_list"] = self.price_tags.all()
return details if details else None
def resolve_parents(self: models.Feature, info, **kwargs):
return self.parents.all()
def resolve_children(self: models.Feature, info, **kwargs):
return self.children.all()
@classmethod
def get_queryset(cls, queryset, info):
return (
queryset.filter(visibility=Visibility.VISIBLE)
.select_related("source_type", "category", "teaser")
.prefetch_related(
"category__translations",
"contact_info",
"children",
"details",
"price_tags",
"price_tags__translations",
"images",
"images__license",
"images__license__translations",
"links",
"opening_hours_periods",
"opening_hours_periods__opening_hours",
"opening_hours_periods__translations",
"parents",
"tags",
"tags__translations",
"teaser__translations",
"translations",
)
)
class FeatureTranslationsInput(graphene.InputObjectType):
language_code = LanguageEnum(required=True)
name = graphene.String(required=True, description=_("Name of the feature"))
description = graphene.String(description=_("Description of the feature"))
url = graphene.String(description=_("URL for more information about this feature"))
one_liner = graphene.String(description=_("Short introductory text or a tagline"))
class ContactInfoInput(graphene.InputObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
phone_number = graphene.String()
email = graphene.String()
class CreateFeatureMutation(relay.ClientIDMutation):
class Input:
translations = graphene.List(
graphene.NonNull(FeatureTranslationsInput), required=True
)
geometry = graphql_geojson.Geometry(required=True)
contact_info = ContactInfoInput()
category_id = graphene.String()
tag_ids = graphene.List(graphene.String)
feature = graphene.Field(Feature)
@classmethod
def get_source_type(cls):
st, created = models.SourceType.objects.get_or_create(system="ahti", type="api")
return st
@classmethod
@transaction.atomic
def mutate_and_get_payload(cls, root, info, **kwargs):
contact_info_values = kwargs.pop("contact_info", None)
tag_ids = kwargs.pop("tag_ids", None)
category_id = kwargs.pop("category_id", None)
now = timezone.now()
values = {
"source_type": cls.get_source_type(),
"source_id": uuid.uuid4(),
"source_modified_at": now,
"mapped_at": now,
"visibility": Visibility.DRAFT,
}
values.update(kwargs)
if category_id:
values["category"] = Category.objects.get(id=category_id)
if tag_ids:
tags = [models.Tag.objects.get(id=tag_id) for tag_id in tag_ids]
else:
tags = []
feature = models.Feature.objects.create_translatable_object(**values)
if contact_info_values:
ci = models.ContactInfo.objects.create(
feature=feature, **contact_info_values
)
ci.full_clean()
ci.save()
if tags:
feature.tags.set(tags)
return CreateFeatureMutation(feature=feature)
class Query(graphene.ObjectType):
features = DjangoFilterConnectionField(
Feature, description=_("Retrieve all features matching the given filters")
)
feature = graphene.Field(
Feature,
id=ID(description=_("The ID of the object")), |
def resolve_feature(self, info, id=None, ahti_id=None, **kwargs):
if id:
return relay.Node.get_node_from_global_id(info, id, only_type=Feature)
if ahti_id:
try:
return Feature.get_queryset(models.Feature.objects, info).ahti_id(
ahti_id=ahti_id
)
except models.Feature.DoesNotExist:
return None
raise GraphQLError("You must provide either `id` or `ahtiId`.")
def resolve_tags(self, info, **kwargs):
return models.Tag.objects.all()
class Mutation(graphene.ObjectType):
create_feature = CreateFeatureMutation.Field(
description=_(
"Create a new feature into the system which will go through a"
"review before it is published into the API."
)
) | ahti_id=String(description=_("Ahti ID of the object")),
description=_("Retrieve a single feature"),
)
tags = graphene.List(Tag, description=_("Retrieve all tags")) | random_line_split |
schema.py | import uuid
import django_filters
import graphene
import graphql_geojson
from django.apps import apps
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from graphene import ID, ObjectType, relay, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql import GraphQLError
from graphql_geojson.filters import DistanceFilter
from categories.models import Category
from features import models
from features.enums import HarborMooringType, OverrideFieldType, Visibility, Weekday
from utils.graphene import LanguageEnum, StringListFilter
HarborMooringTypeEnum = graphene.Enum.from_enum(
HarborMooringType, description=lambda e: e.label if e else ""
)
WeekdayEnum = graphene.Enum.from_enum(
Weekday, description=lambda e: e.label if e else ""
)
class Address(ObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
class ContactInfo(DjangoObjectType):
"""Contact information for the given feature."""
class Meta:
model = models.ContactInfo
fields = ("email", "phone_number")
address = graphene.Field(Address)
def resolve_address(self: models.ContactInfo, info, **kwargs):
return {
"street_address": self.street_address,
"postal_code": self.postal_code,
"municipality": self.municipality,
}
class ExternalLink(DjangoObjectType):
"""Link to an external system.
Link can be e.g. to an online store, a berth rental or to ferry information.
"""
class Meta:
model = models.Link
fields = (
"type",
"url",
)
class FeatureSource(ObjectType):
"""Source system information for a feature."""
system = graphene.String(
required=True,
description=_(
"Name of the source system (e.g. 'myhelsinki', 'ahti', "
"'ulkoliikuntakartta', 'digitransit')"
),
)
type = graphene.String(
required=True,
description=_(
"Type of the feature in the source system, if applicable (e.g. 'place', "
"'activity', 'event', 'route')"
),
)
id = graphene.String(
required=True, description="ID of the current feature in source system"
)
class PriceTag(DjangoObjectType):
"""An item displayed in a price list."""
class Meta:
model = models.PriceTag
fields = ("price",)
item = graphene.String(required=True, description=_("Name of the item"))
price = graphene.Decimal(required=True, description=_("Price of the item in EUR"))
unit = graphene.String(
description=_(
"Unit of the price (e.g. 'hour', 'day', 'piece', 'person', 'child', "
"'one way')"
),
)
class Teaser(DjangoObjectType):
"""Simple content element (e.g. something special about a feature)."""
class Meta:
model = models.FeatureTeaser
fields = () # Don't include any fields from the model automatically
header = graphene.String(
description=_("An opening, e.g. 'Starting' from 'Starting from 7€/day.'")
)
main = graphene.String(description=_("The meat of the deal, '7€/day' part"))
class FeatureTranslations(DjangoObjectType):
"Values in other languages for the feature attributes that can have translations."
language_code = LanguageEnum(required=True)
class Meta:
model = apps.get_model("features", "FeatureTranslation")
exclude = ("id", "master")
class Image(DjangoObjectType):
class Meta:
model = models.Image
fields = (
"url",
"copyright_owner",
"license",
)
class License(DjangoObjectType):
class Meta:
model = models.License
fields = ("id",)
name = graphene.String(required=True, description=_("Display name of the license"))
class Tag(DjangoObjectType):
"""Tags are associated with things (like features)."""
class Meta:
model = models.Tag
fields = ("id", "features")
name = graphene.String(required=True, description=_("Display name of the tag"))
class OpeningHoursPeriod(DjangoObjectType):
"""A period during which certain opening hours are valid."""
class Meta:
model = models.OpeningHoursPeriod
fields = (
"valid_from",
"valid_to",
"opening_hours",
)
comment = graphene.String(
description=_(
"Comment for this opening hour period (e.g. 'Exceptional opening hours "
"during Midsummer')"
),
)
class OpeningHours(DjangoObjectType):
"""The daily opening hours / hours of operation of something."""
class Meta:
model = models.OpeningHours
fields = (
"opens",
"closes",
"all_day",
)
day = WeekdayEnum(required=True, description=_("Day of week"))
class Depth(ObjectType):
"""The depth of something, in meters.
Can be a single value (min and max are equal) or a range.
(Consider: harbor/lake/pool/mineshaft)."
"""
min = graphene.Float(
required=True,
description=_(
"An approximation of the minimum depth (or lower end of the range)"
),
)
max = graphene.Float(
required=True,
description=_(
"An approximation of the maximum depth (or deeper end of the range)"
),
)
class HarborDetails(ObjectType):
"""Information specific to harbors (and piers)."""
moorings = graphene.List(
graphene.NonNull(HarborMooringTypeEnum),
description=_("Mooring types available in the harbor"),
)
depth = graphene.Field(
Depth, description=_("Approximate depth of the harbor, in meters")
)
def resolve_moorings(self: models.FeatureDetails, info, **kwargs):
return self.data["berth_moorings"]
def resolve_depth(self: models.FeatureDetails, info, **kwargs):
"""Minimum depth is mandatory, maximum is included for a range."""
min = self.data.get("berth_min_depth")
max = self.data.get("berth_max_depth")
if min is None:
return None
return {
"min": min,
"max": max,
}
class FeatureDetails(ObjectType):
"""Detailed information a feature might have."""
harbor = graphene.Field(HarborDetails, description=_("Details of a harbor"))
price_list = graphene.List(
"features.schema.PriceTag",
required=True,
description=_("Price list related to a feature"),
)
class FeatureFilter(django_filters.FilterSet):
"""Contains the filters to use when retrieving features."""
class Meta:
model = models.Feature
fields = [
"distance_lte",
"updated_since",
"tagged_with_any",
"tagged_with_all",
"category",
]
distance_lte = DistanceFilter(
field_name="geometry",
lookup_expr="distance_lte",
label=_("Fetch features within a given distance from the given geometry"),
)
updated_since = django_filters.IsoDateTimeFilter(
method="filter_updated_since",
label=_("Fetch features that have changed since specified timestamp"),
)
tagged_with_any = StringListFilter(
method="filter_tagged_with_any",
label=_("Fetch features tagged with any of the specified tags (ids)"),
)
tagged_with_all = StringListFilter(
method="filter_tagged_with_all",
label=_("Fetch features tagged with all of the specified tags (ids)"),
)
category = StringListFilter(
method="filter_category", label=_("Fetch features from included categories")
)
def filter_updated_since(self, queryset, name, value):
return queryset.filter(
Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value)
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_any(self, queryset, name, value):
return queryset.filter(
tags__in=value
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_all(self, queryset, name, value):
for v in value:
queryset = queryset.filter(tags=v)
return queryset
def filter_category(self, queryset, name, value):
return queryset.filter(category__in=value)
class Feature(graphql_geojson.GeoJSONType):
"""Features in Ahti are structured according to GeoJSON specification.
All Ahti specific attributes are contained within attribute `properties`.
**Note!** `Feature.type` always has the value `Feature`.
"""
class Meta:
fields = (
"id",
"category",
"created_at",
"contact_info",
"teaser",
"details",
"geometry",
"images",
"links",
"opening_hours_periods",
"tags",
"translations",
)
filterset_class = FeatureFilter
model = models.Feature
geojson_field = "geometry"
interfaces = (relay.Node,)
ahti_id = graphene.String(
required=True,
description=_(
"Human readable ID. Format examples: "
"'ahti:feature:12C4' or 'myhelsinki:place:5678'"
),
)
source = graphene.Field(
FeatureSource, required=True, description=_("Source of the feature")
)
name = graphene.String(required=True, description=_("Name of the feature"))
one_liner = graphene.String(
required=True, description=_("Short introductory text or a tagline")
)
description = graphene.String(description=_("Description of the feature"))
details = graphene.Field(
FeatureDetails, description=_("Detailed information a feature might have")
)
url = graphene.String(description=_("URL for more information about this feature"))
modified_at = graphene.DateTime(required=True)
parents = graphene.List(
"features.schema.Feature",
required=True,
description=_("Parents of this feature"),
)
children = graphene.List(
"features.schema.Feature",
required=True,
description=_(
"Children of this feature (ex. stops along a route, piers of a harbor etc.)"
),
)
def resolve_source(self: models.Feature, info, **kwargs):
return {
"system": self.source_type.system,
"type": self.source_type.type,
"id": self.source_id,
}
def resolve_name(self: models.Feature, info, **kwargs):
name_override = self.overrides.filter(field=OverrideFieldType.NAME).first()
if name_override:
return name_override.value
return self.name
def resolve_modified_at(self: models.Feature, info, **kwargs):
latest_override = self.overrides.order_by("-modified_at").first()
return (
max(self.source_modified_at, latest_override.modified_at)
if latest_override
else self.source_modified_at
)
def resolve_details(self: models.Feature, info, **kwargs):
details = {}
for detail in self.details.all():
# Default dict resolver will resolve this for FeatureDetails
details[detail.type.lower()] = detail
# PriceTags have a relation to Feature model, so we resolve it separately
details["price_list"] = self.price_tags.all()
return details if details else None
def resolve_parents(self: models.Feature, info, **kwargs):
return self.parents.all()
def resolve_children(self: models.Feature, info, **kwargs):
return self.children.all()
@classmethod
def get_queryset(cls, queryset, info):
return (
queryset.filter(visibility=Visibility.VISIBLE)
.select_related("source_type", "category", "teaser")
.prefetch_related(
"category__translations",
"contact_info",
"children",
"details",
"price_tags",
"price_tags__translations",
"images",
"images__license",
"images__license__translations",
"links",
"opening_hours_periods",
"opening_hours_periods__opening_hours",
"opening_hours_periods__translations",
"parents",
"tags",
"tags__translations",
"teaser__translations",
"translations",
)
)
class FeatureTranslationsInput(graphene.InputObjectType):
language_code = LanguageEnum(required=True)
name = graphene.String(required=True, description=_("Name of the feature"))
description = graphene.String(description=_("Description of the feature"))
url = graphene.String(description=_("URL for more information about this feature"))
one_liner = graphene.String(description=_("Short introductory text or a tagline"))
class ContactInfoInput(graphene.InputObjectType):
stre | lass CreateFeatureMutation(relay.ClientIDMutation):
class Input:
translations = graphene.List(
graphene.NonNull(FeatureTranslationsInput), required=True
)
geometry = graphql_geojson.Geometry(required=True)
contact_info = ContactInfoInput()
category_id = graphene.String()
tag_ids = graphene.List(graphene.String)
feature = graphene.Field(Feature)
@classmethod
def get_source_type(cls):
st, created = models.SourceType.objects.get_or_create(system="ahti", type="api")
return st
@classmethod
@transaction.atomic
def mutate_and_get_payload(cls, root, info, **kwargs):
contact_info_values = kwargs.pop("contact_info", None)
tag_ids = kwargs.pop("tag_ids", None)
category_id = kwargs.pop("category_id", None)
now = timezone.now()
values = {
"source_type": cls.get_source_type(),
"source_id": uuid.uuid4(),
"source_modified_at": now,
"mapped_at": now,
"visibility": Visibility.DRAFT,
}
values.update(kwargs)
if category_id:
values["category"] = Category.objects.get(id=category_id)
if tag_ids:
tags = [models.Tag.objects.get(id=tag_id) for tag_id in tag_ids]
else:
tags = []
feature = models.Feature.objects.create_translatable_object(**values)
if contact_info_values:
ci = models.ContactInfo.objects.create(
feature=feature, **contact_info_values
)
ci.full_clean()
ci.save()
if tags:
feature.tags.set(tags)
return CreateFeatureMutation(feature=feature)
class Query(graphene.ObjectType):
features = DjangoFilterConnectionField(
Feature, description=_("Retrieve all features matching the given filters")
)
feature = graphene.Field(
Feature,
id=ID(description=_("The ID of the object")),
ahti_id=String(description=_("Ahti ID of the object")),
description=_("Retrieve a single feature"),
)
tags = graphene.List(Tag, description=_("Retrieve all tags"))
def resolve_feature(self, info, id=None, ahti_id=None, **kwargs):
if id:
return relay.Node.get_node_from_global_id(info, id, only_type=Feature)
if ahti_id:
try:
return Feature.get_queryset(models.Feature.objects, info).ahti_id(
ahti_id=ahti_id
)
except models.Feature.DoesNotExist:
return None
raise GraphQLError("You must provide either `id` or `ahtiId`.")
def resolve_tags(self, info, **kwargs):
return models.Tag.objects.all()
class Mutation(graphene.ObjectType):
create_feature = CreateFeatureMutation.Field(
description=_(
"Create a new feature into the system which will go through a"
"review before it is published into the API."
)
)
| et_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
phone_number = graphene.String()
email = graphene.String()
c | identifier_body |
schema.py | import uuid
import django_filters
import graphene
import graphql_geojson
from django.apps import apps
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from graphene import ID, ObjectType, relay, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql import GraphQLError
from graphql_geojson.filters import DistanceFilter
from categories.models import Category
from features import models
from features.enums import HarborMooringType, OverrideFieldType, Visibility, Weekday
from utils.graphene import LanguageEnum, StringListFilter
HarborMooringTypeEnum = graphene.Enum.from_enum(
HarborMooringType, description=lambda e: e.label if e else ""
)
WeekdayEnum = graphene.Enum.from_enum(
Weekday, description=lambda e: e.label if e else ""
)
class Address(ObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
class ContactInfo(DjangoObjectType):
"""Contact information for the given feature."""
class Meta:
model = models.ContactInfo
fields = ("email", "phone_number")
address = graphene.Field(Address)
def resolve_address(self: models.ContactInfo, info, **kwargs):
return {
"street_address": self.street_address,
"postal_code": self.postal_code,
"municipality": self.municipality,
}
class ExternalLink(DjangoObjectType):
"""Link to an external system.
Link can be e.g. to an online store, a berth rental or to ferry information.
"""
class Meta:
model = models.Link
fields = (
"type",
"url",
)
class FeatureSource(ObjectType):
"""Source system information for a feature."""
system = graphene.String(
required=True,
description=_(
"Name of the source system (e.g. 'myhelsinki', 'ahti', "
"'ulkoliikuntakartta', 'digitransit')"
),
)
type = graphene.String(
required=True,
description=_(
"Type of the feature in the source system, if applicable (e.g. 'place', "
"'activity', 'event', 'route')"
),
)
id = graphene.String(
required=True, description="ID of the current feature in source system"
)
class PriceTag(DjangoObjectType):
"""An item displayed in a price list."""
class Meta:
model = models.PriceTag
fields = ("price",)
item = graphene.String(required=True, description=_("Name of the item"))
price = graphene.Decimal(required=True, description=_("Price of the item in EUR"))
unit = graphene.String(
description=_(
"Unit of the price (e.g. 'hour', 'day', 'piece', 'person', 'child', "
"'one way')"
),
)
class Teaser(DjangoObjectType):
"""Simple content element (e.g. something special about a feature)."""
class Meta:
model = models.FeatureTeaser
fields = () # Don't include any fields from the model automatically
header = graphene.String(
description=_("An opening, e.g. 'Starting' from 'Starting from 7€/day.'")
)
main = graphene.String(description=_("The meat of the deal, '7€/day' part"))
class FeatureTranslations(DjangoObjectType):
"Values in other languages for the feature attributes that can have translations."
language_code = LanguageEnum(required=True)
class Meta:
model = apps.get_model("features", "FeatureTranslation")
exclude = ("id", "master")
class Image(DjangoObjectType):
class Meta:
model = models.Image
fields = (
"url",
"copyright_owner",
"license",
)
class License(DjangoObjectType):
class Meta:
model = models.License
fields = ("id",)
name = graphene.String(required=True, description=_("Display name of the license"))
class Tag(DjangoObjectType):
"""Tags are associated with things (like features)."""
class Meta:
model = models.Tag
fields = ("id", "features")
name = graphene.String(required=True, description=_("Display name of the tag"))
class OpeningHoursPeriod(DjangoObjectType):
"""A period during which certain opening hours are valid."""
class Meta:
model = models.OpeningHoursPeriod
fields = (
"valid_from",
"valid_to",
"opening_hours",
)
comment = graphene.String(
description=_(
"Comment for this opening hour period (e.g. 'Exceptional opening hours "
"during Midsummer')"
),
)
class OpeningHours(DjangoObjectType):
"""The daily opening hours / hours of operation of something."""
class Meta:
model = models.OpeningHours
fields = (
"opens",
"closes",
"all_day",
)
day = WeekdayEnum(required=True, description=_("Day of week"))
class Depth(ObjectType):
"""The depth of something, in meters.
Can be a single value (min and max are equal) or a range.
(Consider: harbor/lake/pool/mineshaft)."
"""
min = graphene.Float(
required=True,
description=_(
"An approximation of the minimum depth (or lower end of the range)"
),
)
max = graphene.Float(
required=True,
description=_(
"An approximation of the maximum depth (or deeper end of the range)"
),
)
class HarborDetails(ObjectType):
"""Information specific to harbors (and piers)."""
moorings = graphene.List(
graphene.NonNull(HarborMooringTypeEnum),
description=_("Mooring types available in the harbor"),
)
depth = graphene.Field(
Depth, description=_("Approximate depth of the harbor, in meters")
)
def resolve_moorings(self: models.FeatureDetails, info, **kwargs):
return self.data["berth_moorings"]
def resolve_depth(self: models.FeatureDetails, info, **kwargs):
"""Minimum depth is mandatory, maximum is included for a range."""
min = self.data.get("berth_min_depth")
max = self.data.get("berth_max_depth")
if min is None:
return None
return {
"min": min,
"max": max,
}
class FeatureDetails(ObjectType):
"""Detailed information a feature might have."""
harbor = graphene.Field(HarborDetails, description=_("Details of a harbor"))
price_list = graphene.List(
"features.schema.PriceTag",
required=True,
description=_("Price list related to a feature"),
)
class FeatureFilter(django_filters.FilterSet):
"""Contains the filters to use when retrieving features."""
class Meta:
model = models.Feature
fields = [
"distance_lte",
"updated_since",
"tagged_with_any",
"tagged_with_all",
"category",
]
distance_lte = DistanceFilter(
field_name="geometry",
lookup_expr="distance_lte",
label=_("Fetch features within a given distance from the given geometry"),
)
updated_since = django_filters.IsoDateTimeFilter(
method="filter_updated_since",
label=_("Fetch features that have changed since specified timestamp"),
)
tagged_with_any = StringListFilter(
method="filter_tagged_with_any",
label=_("Fetch features tagged with any of the specified tags (ids)"),
)
tagged_with_all = StringListFilter(
method="filter_tagged_with_all",
label=_("Fetch features tagged with all of the specified tags (ids)"),
)
category = StringListFilter(
method="filter_category", label=_("Fetch features from included categories")
)
def filter_updated_since(self, queryset, name, value):
return queryset.filter(
Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value)
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_any(self, queryset, name, value):
return queryset.filter(
tags__in=value
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_all(self, queryset, name, value):
for v in value:
queryset = queryset.filter(tags=v)
return queryset
def filter_category(self, queryset, name, value):
return queryset.filter(category__in=value)
class Feature(graphql_geojson.GeoJSONType):
"""Features in Ahti are structured according to GeoJSON specification.
All Ahti specific attributes are contained within attribute `properties`.
**Note!** `Feature.type` always has the value `Feature`.
"""
class Meta:
fields = (
"id",
"category",
"created_at",
"contact_info",
"teaser",
"details",
"geometry",
"images",
"links",
"opening_hours_periods",
"tags",
"translations",
)
filterset_class = FeatureFilter
model = models.Feature
geojson_field = "geometry"
interfaces = (relay.Node,)
ahti_id = graphene.String(
required=True,
description=_(
"Human readable ID. Format examples: "
"'ahti:feature:12C4' or 'myhelsinki:place:5678'"
),
)
source = graphene.Field(
FeatureSource, required=True, description=_("Source of the feature")
)
name = graphene.String(required=True, description=_("Name of the feature"))
one_liner = graphene.String(
required=True, description=_("Short introductory text or a tagline")
)
description = graphene.String(description=_("Description of the feature"))
details = graphene.Field(
FeatureDetails, description=_("Detailed information a feature might have")
)
url = graphene.String(description=_("URL for more information about this feature"))
modified_at = graphene.DateTime(required=True)
parents = graphene.List(
"features.schema.Feature",
required=True,
description=_("Parents of this feature"),
)
children = graphene.List(
"features.schema.Feature",
required=True,
description=_(
"Children of this feature (ex. stops along a route, piers of a harbor etc.)"
),
)
def resolve_source(self: models.Feature, info, **kwargs):
return {
"system": self.source_type.system,
"type": self.source_type.type,
"id": self.source_id,
}
def resolve_name(self: models.Feature, info, **kwargs):
name_override = self.overrides.filter(field=OverrideFieldType.NAME).first()
if name_override:
return name_override.value
return self.name
def resolve_modified_at(self: models.Feature, info, **kwargs):
latest_override = self.overrides.order_by("-modified_at").first()
return (
max(self.source_modified_at, latest_override.modified_at)
if latest_override
else self.source_modified_at
)
def resolve_details(self: models.Feature, info, **kwargs):
details = {}
for detail in self.details.all():
# Default dict resolver will resolve this for FeatureDetails
details[detail.type.lower()] = detail
# PriceTags have a relation to Feature model, so we resolve it separately
details["price_list"] = self.price_tags.all()
return details if details else None
def resolve_parents(self: models.Feature, info, **kwargs):
return self.parents.all()
def resolve_children(self: models.Feature, info, **kwargs):
return self.children.all()
@classmethod
def get_queryset(cls, queryset, info):
return (
queryset.filter(visibility=Visibility.VISIBLE)
.select_related("source_type", "category", "teaser")
.prefetch_related(
"category__translations",
"contact_info",
"children",
"details",
"price_tags",
"price_tags__translations",
"images",
"images__license",
"images__license__translations",
"links",
"opening_hours_periods",
"opening_hours_periods__opening_hours",
"opening_hours_periods__translations",
"parents",
"tags",
"tags__translations",
"teaser__translations",
"translations",
)
)
class FeatureTranslationsInput(graphene.InputObjectType):
language_code = LanguageEnum(required=True)
name = graphene.String(required=True, description=_("Name of the feature"))
description = graphene.String(description=_("Description of the feature"))
url = graphene.String(description=_("URL for more information about this feature"))
one_liner = graphene.String(description=_("Short introductory text or a tagline"))
class ContactInfoInput(graphene.InputObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
phone_number = graphene.String()
email = graphene.String()
class CreateFeatureMutation(relay.ClientIDMutation):
class Input:
translations = graphene.List(
graphene.NonNull(FeatureTranslationsInput), required=True
)
geometry = graphql_geojson.Geometry(required=True)
contact_info = ContactInfoInput()
category_id = graphene.String()
tag_ids = graphene.List(graphene.String)
feature = graphene.Field(Feature)
@classmethod
def get_source_type(cls):
st, created = models.SourceType.objects.get_or_create(system="ahti", type="api")
return st
@classmethod
@transaction.atomic
def mutate_and_get_payload(cls, root, info, **kwargs):
contact_info_values = kwargs.pop("contact_info", None)
tag_ids = kwargs.pop("tag_ids", None)
category_id = kwargs.pop("category_id", None)
now = timezone.now()
values = {
"source_type": cls.get_source_type(),
"source_id": uuid.uuid4(),
"source_modified_at": now,
"mapped_at": now,
"visibility": Visibility.DRAFT,
}
values.update(kwargs)
if category_id:
values["category"] = Category.objects.get(id=category_id)
if tag_ids:
tags = [models.Tag.objects.get(id=tag_id) for tag_id in tag_ids]
else:
tags = []
feature = models.Feature.objects.create_translatable_object(**values)
if contact_info_values:
ci = models.ContactInfo.objects.create(
feature=feature, **contact_info_values
)
ci.full_clean()
ci.save()
if tags:
feature.tags.set(tags)
return CreateFeatureMutation(feature=feature)
class Query(graphene.ObjectType):
features = DjangoFilterConnectionField(
Feature, description=_("Retrieve all features matching the given filters")
)
feature = graphene.Field(
Feature,
id=ID(description=_("The ID of the object")),
ahti_id=String(description=_("Ahti ID of the object")),
description=_("Retrieve a single feature"),
)
tags = graphene.List(Tag, description=_("Retrieve all tags"))
def resolve_feature(self, info, id=None, ahti_id=None, **kwargs):
if id:
retu | if ahti_id:
try:
return Feature.get_queryset(models.Feature.objects, info).ahti_id(
ahti_id=ahti_id
)
except models.Feature.DoesNotExist:
return None
raise GraphQLError("You must provide either `id` or `ahtiId`.")
def resolve_tags(self, info, **kwargs):
return models.Tag.objects.all()
class Mutation(graphene.ObjectType):
create_feature = CreateFeatureMutation.Field(
description=_(
"Create a new feature into the system which will go through a"
"review before it is published into the API."
)
)
| rn relay.Node.get_node_from_global_id(info, id, only_type=Feature)
| conditional_block |
config.go | package config
import (
"encoding/json"
"fmt"
"io"
"net"
"os"
"strings"
cnitypes "github.com/containernetworking/cni/pkg/types"
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/imdario/mergo"
netutils "k8s.io/utils/net"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/logging"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"
)
// canonicalizeIP makes sure a provided ip is in standard form
func canonicalizeIP(ip *net.IP) error {
if ip.To4() != nil {
*ip = ip.To4()
return nil
} else if ip.To16() != nil {
*ip = ip.To16()
return nil
}
return fmt.Errorf("IP %s not v4 nor v6", *ip)
}
// LoadIPAMConfig creates IPAMConfig using json encoded configuration provided
// as `bytes`. At the moment values provided in envArgs are ignored so there
// is no possibility to overload the json configuration using envArgs
func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, string, error) {
var n types.Net
if err := json.Unmarshal(bytes, &n); err != nil {
return nil, "", fmt.Errorf("LoadIPAMConfig - JSON Parsing Error: %s / bytes: %s", err, bytes)
}
if n.IPAM == nil {
return nil, "", fmt.Errorf("IPAM config missing 'ipam' key")
} else if !isNetworkRelevant(n.IPAM) {
return nil, "", NewInvalidPluginError(n.IPAM.Type)
}
args := types.IPAMEnvArgs{}
if err := cnitypes.LoadArgs(envArgs, &args); err != nil {
return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err)
}
n.IPAM.PodName = string(args.K8S_POD_NAME)
n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE)
flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...)
if err != nil {
return nil, "", err
}
// Now let's try to merge the configurations...
// NB: Don't try to do any initialization before this point or it won't account for merged flat file.
var OverlappingRanges bool = n.IPAM.OverlappingRanges
if err := mergo.Merge(&n, flatipam); err != nil {
logging.Errorf("Merge error with flat file: %s", err)
}
n.IPAM.OverlappingRanges = OverlappingRanges
// Logging
if n.IPAM.LogFile != "" {
logging.SetLogFile(n.IPAM.LogFile)
}
if n.IPAM.LogLevel != "" {
logging.SetLogLevel(n.IPAM.LogLevel)
}
if foundflatfile != "" {
logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile)
}
if n.IPAM.Range != "" {
oldRange := types.RangeConfiguration{
OmitRanges: n.IPAM.OmitRanges,
Range: n.IPAM.Range,
RangeStart: n.IPAM.RangeStart,
RangeEnd: n.IPAM.RangeEnd,
}
n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...)
}
for idx := range n.IPAM.IPRanges {
if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 | else {
firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range)
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
if n.IPAM.IPRanges[idx].RangeStart == nil {
firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address
n.IPAM.IPRanges[idx].RangeStart = firstip
}
}
}
n.IPAM.OmitRanges = nil
n.IPAM.Range = ""
n.IPAM.RangeStart = nil
n.IPAM.RangeEnd = nil
if n.IPAM.Kubernetes.KubeConfigPath == "" {
return nil, "", storageError()
}
if n.IPAM.GatewayStr != "" {
gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr)
if gwip == nil {
return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr)
}
n.IPAM.Gateway = gwip
}
for i := range n.IPAM.OmitRanges {
_, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err)
}
}
if err := configureStatic(&n, args); err != nil {
return nil, "", err
}
if n.IPAM.LeaderLeaseDuration == 0 {
n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration
}
if n.IPAM.LeaderRenewDeadline == 0 {
n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline
}
if n.IPAM.LeaderRetryPeriod == 0 {
n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod
}
// Copy net name into IPAM so not to drag Net struct around
n.IPAM.Name = n.Name
return n.IPAM, n.CNIVersion, nil
}
func pathExists(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
return true
}
func configureStatic(n *types.Net, args types.IPAMEnvArgs) error {
// Validate all ranges
numV4 := 0
numV6 := 0
for i := range n.IPAM.Addresses {
ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr)
if err != nil {
return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err)
}
n.IPAM.Addresses[i].Address = *addr
n.IPAM.Addresses[i].Address.IP = ip
if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil {
return fmt.Errorf("invalid address %d: %s", i, err)
}
if n.IPAM.Addresses[i].Address.IP.To4() != nil {
n.IPAM.Addresses[i].Version = "4"
numV4++
} else {
n.IPAM.Addresses[i].Version = "6"
numV6++
}
}
newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args)
if err != nil {
return err
}
numV4 = newnumV4
numV6 = newnumV6
// CNI spec 0.2.0 and below supported only one v4 and v6 address
if numV4 > 1 || numV6 > 1 {
for _, v := range types020.SupportedVersions {
if n.CNIVersion == v {
return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion)
}
}
}
return nil
}
func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) {
// Once we have our basics, let's look for our (optional) configuration file
confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"}
confdirs = append(confdirs, extraConfigPaths...)
// We prefix the optional configuration path (so we look there first)
if !isControlLoop && IPAM != nil {
if IPAM.ConfigurationPath != "" {
confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...)
}
}
// Cycle through the path and parse the JSON config
flatipam := types.Net{}
foundflatfile := ""
for _, confpath := range confdirs {
if pathExists(confpath) {
jsonFile, err := os.Open(confpath)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err)
}
defer jsonFile.Close()
jsonBytes, err := io.ReadAll(jsonFile)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - io.ReadAll error: %s", confpath, err)
}
if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s", confpath, err, jsonBytes)
}
foundflatfile = confpath
return flatipam, foundflatfile, nil
}
}
return flatipam, foundflatfile, NewConfigFileNotFoundError()
}
func handleEnvArgs(n *types.Net, numV6 int, numV4 int, args types.IPAMEnvArgs) (int, int, error) {
if args.IP != "" {
for _, item := range strings.Split(string(args.IP), ",") {
ipstr := strings.TrimSpace(item)
ip, subnet, err := netutils.ParseCIDRSloppy(ipstr)
if err != nil {
return numV6, numV4, fmt.Errorf("invalid CIDR %s: %s", ipstr, err)
}
addr := types.Address{Address: net.IPNet{IP: ip, Mask: subnet.Mask}}
if addr.Address.IP.To4() != nil {
addr.Version = "4"
numV4++
} else {
addr.Version = "6"
numV6++
}
n.IPAM.Addresses = append(n.IPAM.Addresses, addr)
}
}
if args.GATEWAY != "" {
for _, item := range strings.Split(string(args.GATEWAY), ",") {
gwip := netutils.ParseIPSloppy(strings.TrimSpace(item))
if gwip == nil {
return numV6, numV4, fmt.Errorf("invalid gateway address: %s", item)
}
for i := range n.IPAM.Addresses {
if n.IPAM.Addresses[i].Address.Contains(gwip) {
n.IPAM.Addresses[i].Gateway = gwip
}
}
}
}
return numV6, numV4, nil
}
func LoadIPAMConfiguration(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, error) {
pluginConfig, err := loadPluginConfig(bytes)
if err != nil {
return nil, err
}
if pluginConfig.Type == "" {
pluginConfigList, err := loadPluginConfigList(bytes)
if err != nil {
return nil, err
}
pluginConfigList.Plugins[0].CNIVersion = pluginConfig.CNIVersion
firstPluginBytes, err := json.Marshal(pluginConfigList.Plugins[0])
if err != nil {
return nil, err
}
ipamConfig, _, err := LoadIPAMConfig(firstPluginBytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
ipamConfig, _, err := LoadIPAMConfig(bytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
func loadPluginConfigList(bytes []byte) (*types.NetConfList, error) {
var netConfList types.NetConfList
if err := json.Unmarshal(bytes, &netConfList); err != nil {
return nil, err
}
return &netConfList, nil
}
func loadPluginConfig(bytes []byte) (*cnitypes.NetConf, error) {
var pluginConfig cnitypes.NetConf
if err := json.Unmarshal(bytes, &pluginConfig); err != nil {
return nil, err
}
return &pluginConfig, nil
}
func isNetworkRelevant(ipamConfig *types.IPAMConfig) bool {
const relevantIPAMType = "whereabouts"
return ipamConfig.Type == relevantIPAMType
}
type InvalidPluginError struct {
ipamType string
}
func NewInvalidPluginError(ipamType string) *InvalidPluginError {
return &InvalidPluginError{ipamType: ipamType}
}
func (e *InvalidPluginError) Error() string {
return fmt.Sprintf("only interested in networks whose IPAM type is 'whereabouts'. This one was: %s", e.ipamType)
}
type ConfigFileNotFoundError struct{}
func NewConfigFileNotFoundError() *ConfigFileNotFoundError {
return &ConfigFileNotFoundError{}
}
func (e *ConfigFileNotFoundError) Error() string {
return "config file not found"
}
func storageError() error {
return fmt.Errorf("you have not configured the storage engine (looks like you're using an invalid `kubernetes.kubeconfig` parameter in your config)")
}
| {
firstip := netutils.ParseIPSloppy(r[0])
if firstip == nil {
return nil, "", fmt.Errorf("invalid range start IP: %s", r[0])
}
lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err)
}
if !ipNet.Contains(firstip) {
return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
n.IPAM.IPRanges[idx].RangeStart = firstip
n.IPAM.IPRanges[idx].RangeEnd = lastip
} | conditional_block |
config.go | package config
import (
"encoding/json"
"fmt"
"io"
"net"
"os"
"strings"
cnitypes "github.com/containernetworking/cni/pkg/types"
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/imdario/mergo"
netutils "k8s.io/utils/net"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/logging"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"
)
// canonicalizeIP makes sure a provided ip is in standard form
func canonicalizeIP(ip *net.IP) error {
if ip.To4() != nil {
*ip = ip.To4()
return nil
} else if ip.To16() != nil {
*ip = ip.To16()
return nil | // as `bytes`. At the moment values provided in envArgs are ignored so there
// is no possibility to overload the json configuration using envArgs
func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, string, error) {
var n types.Net
if err := json.Unmarshal(bytes, &n); err != nil {
return nil, "", fmt.Errorf("LoadIPAMConfig - JSON Parsing Error: %s / bytes: %s", err, bytes)
}
if n.IPAM == nil {
return nil, "", fmt.Errorf("IPAM config missing 'ipam' key")
} else if !isNetworkRelevant(n.IPAM) {
return nil, "", NewInvalidPluginError(n.IPAM.Type)
}
args := types.IPAMEnvArgs{}
if err := cnitypes.LoadArgs(envArgs, &args); err != nil {
return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err)
}
n.IPAM.PodName = string(args.K8S_POD_NAME)
n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE)
flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...)
if err != nil {
return nil, "", err
}
// Now let's try to merge the configurations...
// NB: Don't try to do any initialization before this point or it won't account for merged flat file.
var OverlappingRanges bool = n.IPAM.OverlappingRanges
if err := mergo.Merge(&n, flatipam); err != nil {
logging.Errorf("Merge error with flat file: %s", err)
}
n.IPAM.OverlappingRanges = OverlappingRanges
// Logging
if n.IPAM.LogFile != "" {
logging.SetLogFile(n.IPAM.LogFile)
}
if n.IPAM.LogLevel != "" {
logging.SetLogLevel(n.IPAM.LogLevel)
}
if foundflatfile != "" {
logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile)
}
if n.IPAM.Range != "" {
oldRange := types.RangeConfiguration{
OmitRanges: n.IPAM.OmitRanges,
Range: n.IPAM.Range,
RangeStart: n.IPAM.RangeStart,
RangeEnd: n.IPAM.RangeEnd,
}
n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...)
}
for idx := range n.IPAM.IPRanges {
if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 {
firstip := netutils.ParseIPSloppy(r[0])
if firstip == nil {
return nil, "", fmt.Errorf("invalid range start IP: %s", r[0])
}
lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err)
}
if !ipNet.Contains(firstip) {
return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
n.IPAM.IPRanges[idx].RangeStart = firstip
n.IPAM.IPRanges[idx].RangeEnd = lastip
} else {
firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range)
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
if n.IPAM.IPRanges[idx].RangeStart == nil {
firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address
n.IPAM.IPRanges[idx].RangeStart = firstip
}
}
}
n.IPAM.OmitRanges = nil
n.IPAM.Range = ""
n.IPAM.RangeStart = nil
n.IPAM.RangeEnd = nil
if n.IPAM.Kubernetes.KubeConfigPath == "" {
return nil, "", storageError()
}
if n.IPAM.GatewayStr != "" {
gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr)
if gwip == nil {
return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr)
}
n.IPAM.Gateway = gwip
}
for i := range n.IPAM.OmitRanges {
_, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err)
}
}
if err := configureStatic(&n, args); err != nil {
return nil, "", err
}
if n.IPAM.LeaderLeaseDuration == 0 {
n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration
}
if n.IPAM.LeaderRenewDeadline == 0 {
n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline
}
if n.IPAM.LeaderRetryPeriod == 0 {
n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod
}
// Copy net name into IPAM so not to drag Net struct around
n.IPAM.Name = n.Name
return n.IPAM, n.CNIVersion, nil
}
func pathExists(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
return true
}
func configureStatic(n *types.Net, args types.IPAMEnvArgs) error {
// Validate all ranges
numV4 := 0
numV6 := 0
for i := range n.IPAM.Addresses {
ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr)
if err != nil {
return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err)
}
n.IPAM.Addresses[i].Address = *addr
n.IPAM.Addresses[i].Address.IP = ip
if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil {
return fmt.Errorf("invalid address %d: %s", i, err)
}
if n.IPAM.Addresses[i].Address.IP.To4() != nil {
n.IPAM.Addresses[i].Version = "4"
numV4++
} else {
n.IPAM.Addresses[i].Version = "6"
numV6++
}
}
newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args)
if err != nil {
return err
}
numV4 = newnumV4
numV6 = newnumV6
// CNI spec 0.2.0 and below supported only one v4 and v6 address
if numV4 > 1 || numV6 > 1 {
for _, v := range types020.SupportedVersions {
if n.CNIVersion == v {
return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion)
}
}
}
return nil
}
func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) {
// Once we have our basics, let's look for our (optional) configuration file
confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"}
confdirs = append(confdirs, extraConfigPaths...)
// We prefix the optional configuration path (so we look there first)
if !isControlLoop && IPAM != nil {
if IPAM.ConfigurationPath != "" {
confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...)
}
}
// Cycle through the path and parse the JSON config
flatipam := types.Net{}
foundflatfile := ""
for _, confpath := range confdirs {
if pathExists(confpath) {
jsonFile, err := os.Open(confpath)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err)
}
defer jsonFile.Close()
jsonBytes, err := io.ReadAll(jsonFile)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - io.ReadAll error: %s", confpath, err)
}
if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s", confpath, err, jsonBytes)
}
foundflatfile = confpath
return flatipam, foundflatfile, nil
}
}
return flatipam, foundflatfile, NewConfigFileNotFoundError()
}
func handleEnvArgs(n *types.Net, numV6 int, numV4 int, args types.IPAMEnvArgs) (int, int, error) {
if args.IP != "" {
for _, item := range strings.Split(string(args.IP), ",") {
ipstr := strings.TrimSpace(item)
ip, subnet, err := netutils.ParseCIDRSloppy(ipstr)
if err != nil {
return numV6, numV4, fmt.Errorf("invalid CIDR %s: %s", ipstr, err)
}
addr := types.Address{Address: net.IPNet{IP: ip, Mask: subnet.Mask}}
if addr.Address.IP.To4() != nil {
addr.Version = "4"
numV4++
} else {
addr.Version = "6"
numV6++
}
n.IPAM.Addresses = append(n.IPAM.Addresses, addr)
}
}
if args.GATEWAY != "" {
for _, item := range strings.Split(string(args.GATEWAY), ",") {
gwip := netutils.ParseIPSloppy(strings.TrimSpace(item))
if gwip == nil {
return numV6, numV4, fmt.Errorf("invalid gateway address: %s", item)
}
for i := range n.IPAM.Addresses {
if n.IPAM.Addresses[i].Address.Contains(gwip) {
n.IPAM.Addresses[i].Gateway = gwip
}
}
}
}
return numV6, numV4, nil
}
func LoadIPAMConfiguration(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, error) {
pluginConfig, err := loadPluginConfig(bytes)
if err != nil {
return nil, err
}
if pluginConfig.Type == "" {
pluginConfigList, err := loadPluginConfigList(bytes)
if err != nil {
return nil, err
}
pluginConfigList.Plugins[0].CNIVersion = pluginConfig.CNIVersion
firstPluginBytes, err := json.Marshal(pluginConfigList.Plugins[0])
if err != nil {
return nil, err
}
ipamConfig, _, err := LoadIPAMConfig(firstPluginBytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
ipamConfig, _, err := LoadIPAMConfig(bytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
func loadPluginConfigList(bytes []byte) (*types.NetConfList, error) {
var netConfList types.NetConfList
if err := json.Unmarshal(bytes, &netConfList); err != nil {
return nil, err
}
return &netConfList, nil
}
func loadPluginConfig(bytes []byte) (*cnitypes.NetConf, error) {
var pluginConfig cnitypes.NetConf
if err := json.Unmarshal(bytes, &pluginConfig); err != nil {
return nil, err
}
return &pluginConfig, nil
}
func isNetworkRelevant(ipamConfig *types.IPAMConfig) bool {
const relevantIPAMType = "whereabouts"
return ipamConfig.Type == relevantIPAMType
}
type InvalidPluginError struct {
ipamType string
}
func NewInvalidPluginError(ipamType string) *InvalidPluginError {
return &InvalidPluginError{ipamType: ipamType}
}
func (e *InvalidPluginError) Error() string {
return fmt.Sprintf("only interested in networks whose IPAM type is 'whereabouts'. This one was: %s", e.ipamType)
}
type ConfigFileNotFoundError struct{}
func NewConfigFileNotFoundError() *ConfigFileNotFoundError {
return &ConfigFileNotFoundError{}
}
func (e *ConfigFileNotFoundError) Error() string {
return "config file not found"
}
func storageError() error {
return fmt.Errorf("you have not configured the storage engine (looks like you're using an invalid `kubernetes.kubeconfig` parameter in your config)")
} | }
return fmt.Errorf("IP %s not v4 nor v6", *ip)
}
// LoadIPAMConfig creates IPAMConfig using json encoded configuration provided | random_line_split |
config.go | package config
import (
"encoding/json"
"fmt"
"io"
"net"
"os"
"strings"
cnitypes "github.com/containernetworking/cni/pkg/types"
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/imdario/mergo"
netutils "k8s.io/utils/net"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/logging"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"
)
// canonicalizeIP makes sure a provided ip is in standard form
func canonicalizeIP(ip *net.IP) error {
if ip.To4() != nil {
*ip = ip.To4()
return nil
} else if ip.To16() != nil {
*ip = ip.To16()
return nil
}
return fmt.Errorf("IP %s not v4 nor v6", *ip)
}
// LoadIPAMConfig creates IPAMConfig using json encoded configuration provided
// as `bytes`. At the moment values provided in envArgs are ignored so there
// is no possibility to overload the json configuration using envArgs
func | (bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, string, error) {
var n types.Net
if err := json.Unmarshal(bytes, &n); err != nil {
return nil, "", fmt.Errorf("LoadIPAMConfig - JSON Parsing Error: %s / bytes: %s", err, bytes)
}
if n.IPAM == nil {
return nil, "", fmt.Errorf("IPAM config missing 'ipam' key")
} else if !isNetworkRelevant(n.IPAM) {
return nil, "", NewInvalidPluginError(n.IPAM.Type)
}
args := types.IPAMEnvArgs{}
if err := cnitypes.LoadArgs(envArgs, &args); err != nil {
return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err)
}
n.IPAM.PodName = string(args.K8S_POD_NAME)
n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE)
flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...)
if err != nil {
return nil, "", err
}
// Now let's try to merge the configurations...
// NB: Don't try to do any initialization before this point or it won't account for merged flat file.
var OverlappingRanges bool = n.IPAM.OverlappingRanges
if err := mergo.Merge(&n, flatipam); err != nil {
logging.Errorf("Merge error with flat file: %s", err)
}
n.IPAM.OverlappingRanges = OverlappingRanges
// Logging
if n.IPAM.LogFile != "" {
logging.SetLogFile(n.IPAM.LogFile)
}
if n.IPAM.LogLevel != "" {
logging.SetLogLevel(n.IPAM.LogLevel)
}
if foundflatfile != "" {
logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile)
}
if n.IPAM.Range != "" {
oldRange := types.RangeConfiguration{
OmitRanges: n.IPAM.OmitRanges,
Range: n.IPAM.Range,
RangeStart: n.IPAM.RangeStart,
RangeEnd: n.IPAM.RangeEnd,
}
n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...)
}
for idx := range n.IPAM.IPRanges {
if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 {
firstip := netutils.ParseIPSloppy(r[0])
if firstip == nil {
return nil, "", fmt.Errorf("invalid range start IP: %s", r[0])
}
lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err)
}
if !ipNet.Contains(firstip) {
return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
n.IPAM.IPRanges[idx].RangeStart = firstip
n.IPAM.IPRanges[idx].RangeEnd = lastip
} else {
firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range)
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
if n.IPAM.IPRanges[idx].RangeStart == nil {
firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address
n.IPAM.IPRanges[idx].RangeStart = firstip
}
}
}
n.IPAM.OmitRanges = nil
n.IPAM.Range = ""
n.IPAM.RangeStart = nil
n.IPAM.RangeEnd = nil
if n.IPAM.Kubernetes.KubeConfigPath == "" {
return nil, "", storageError()
}
if n.IPAM.GatewayStr != "" {
gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr)
if gwip == nil {
return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr)
}
n.IPAM.Gateway = gwip
}
for i := range n.IPAM.OmitRanges {
_, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err)
}
}
if err := configureStatic(&n, args); err != nil {
return nil, "", err
}
if n.IPAM.LeaderLeaseDuration == 0 {
n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration
}
if n.IPAM.LeaderRenewDeadline == 0 {
n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline
}
if n.IPAM.LeaderRetryPeriod == 0 {
n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod
}
// Copy net name into IPAM so not to drag Net struct around
n.IPAM.Name = n.Name
return n.IPAM, n.CNIVersion, nil
}
func pathExists(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
return true
}
func configureStatic(n *types.Net, args types.IPAMEnvArgs) error {
// Validate all ranges
numV4 := 0
numV6 := 0
for i := range n.IPAM.Addresses {
ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr)
if err != nil {
return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err)
}
n.IPAM.Addresses[i].Address = *addr
n.IPAM.Addresses[i].Address.IP = ip
if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil {
return fmt.Errorf("invalid address %d: %s", i, err)
}
if n.IPAM.Addresses[i].Address.IP.To4() != nil {
n.IPAM.Addresses[i].Version = "4"
numV4++
} else {
n.IPAM.Addresses[i].Version = "6"
numV6++
}
}
newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args)
if err != nil {
return err
}
numV4 = newnumV4
numV6 = newnumV6
// CNI spec 0.2.0 and below supported only one v4 and v6 address
if numV4 > 1 || numV6 > 1 {
for _, v := range types020.SupportedVersions {
if n.CNIVersion == v {
return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion)
}
}
}
return nil
}
func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) {
// Once we have our basics, let's look for our (optional) configuration file
confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"}
confdirs = append(confdirs, extraConfigPaths...)
// We prefix the optional configuration path (so we look there first)
if !isControlLoop && IPAM != nil {
if IPAM.ConfigurationPath != "" {
confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...)
}
}
// Cycle through the path and parse the JSON config
flatipam := types.Net{}
foundflatfile := ""
for _, confpath := range confdirs {
if pathExists(confpath) {
jsonFile, err := os.Open(confpath)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err)
}
defer jsonFile.Close()
jsonBytes, err := io.ReadAll(jsonFile)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - io.ReadAll error: %s", confpath, err)
}
if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s", confpath, err, jsonBytes)
}
foundflatfile = confpath
return flatipam, foundflatfile, nil
}
}
return flatipam, foundflatfile, NewConfigFileNotFoundError()
}
func handleEnvArgs(n *types.Net, numV6 int, numV4 int, args types.IPAMEnvArgs) (int, int, error) {
if args.IP != "" {
for _, item := range strings.Split(string(args.IP), ",") {
ipstr := strings.TrimSpace(item)
ip, subnet, err := netutils.ParseCIDRSloppy(ipstr)
if err != nil {
return numV6, numV4, fmt.Errorf("invalid CIDR %s: %s", ipstr, err)
}
addr := types.Address{Address: net.IPNet{IP: ip, Mask: subnet.Mask}}
if addr.Address.IP.To4() != nil {
addr.Version = "4"
numV4++
} else {
addr.Version = "6"
numV6++
}
n.IPAM.Addresses = append(n.IPAM.Addresses, addr)
}
}
if args.GATEWAY != "" {
for _, item := range strings.Split(string(args.GATEWAY), ",") {
gwip := netutils.ParseIPSloppy(strings.TrimSpace(item))
if gwip == nil {
return numV6, numV4, fmt.Errorf("invalid gateway address: %s", item)
}
for i := range n.IPAM.Addresses {
if n.IPAM.Addresses[i].Address.Contains(gwip) {
n.IPAM.Addresses[i].Gateway = gwip
}
}
}
}
return numV6, numV4, nil
}
func LoadIPAMConfiguration(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, error) {
pluginConfig, err := loadPluginConfig(bytes)
if err != nil {
return nil, err
}
if pluginConfig.Type == "" {
pluginConfigList, err := loadPluginConfigList(bytes)
if err != nil {
return nil, err
}
pluginConfigList.Plugins[0].CNIVersion = pluginConfig.CNIVersion
firstPluginBytes, err := json.Marshal(pluginConfigList.Plugins[0])
if err != nil {
return nil, err
}
ipamConfig, _, err := LoadIPAMConfig(firstPluginBytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
ipamConfig, _, err := LoadIPAMConfig(bytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
func loadPluginConfigList(bytes []byte) (*types.NetConfList, error) {
var netConfList types.NetConfList
if err := json.Unmarshal(bytes, &netConfList); err != nil {
return nil, err
}
return &netConfList, nil
}
func loadPluginConfig(bytes []byte) (*cnitypes.NetConf, error) {
var pluginConfig cnitypes.NetConf
if err := json.Unmarshal(bytes, &pluginConfig); err != nil {
return nil, err
}
return &pluginConfig, nil
}
func isNetworkRelevant(ipamConfig *types.IPAMConfig) bool {
const relevantIPAMType = "whereabouts"
return ipamConfig.Type == relevantIPAMType
}
type InvalidPluginError struct {
ipamType string
}
func NewInvalidPluginError(ipamType string) *InvalidPluginError {
return &InvalidPluginError{ipamType: ipamType}
}
func (e *InvalidPluginError) Error() string {
return fmt.Sprintf("only interested in networks whose IPAM type is 'whereabouts'. This one was: %s", e.ipamType)
}
type ConfigFileNotFoundError struct{}
func NewConfigFileNotFoundError() *ConfigFileNotFoundError {
return &ConfigFileNotFoundError{}
}
func (e *ConfigFileNotFoundError) Error() string {
return "config file not found"
}
func storageError() error {
return fmt.Errorf("you have not configured the storage engine (looks like you're using an invalid `kubernetes.kubeconfig` parameter in your config)")
}
| LoadIPAMConfig | identifier_name |
config.go | package config
import (
"encoding/json"
"fmt"
"io"
"net"
"os"
"strings"
cnitypes "github.com/containernetworking/cni/pkg/types"
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/imdario/mergo"
netutils "k8s.io/utils/net"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/logging"
"github.com/k8snetworkplumbingwg/whereabouts/pkg/types"
)
// canonicalizeIP makes sure a provided ip is in standard form
func canonicalizeIP(ip *net.IP) error {
if ip.To4() != nil {
*ip = ip.To4()
return nil
} else if ip.To16() != nil {
*ip = ip.To16()
return nil
}
return fmt.Errorf("IP %s not v4 nor v6", *ip)
}
// LoadIPAMConfig creates IPAMConfig using json encoded configuration provided
// as `bytes`. At the moment values provided in envArgs are ignored so there
// is no possibility to overload the json configuration using envArgs
func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, string, error) |
func pathExists(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
return true
}
func configureStatic(n *types.Net, args types.IPAMEnvArgs) error {
// Validate all ranges
numV4 := 0
numV6 := 0
for i := range n.IPAM.Addresses {
ip, addr, err := netutils.ParseCIDRSloppy(n.IPAM.Addresses[i].AddressStr)
if err != nil {
return fmt.Errorf("invalid CIDR in addresses %s: %s", n.IPAM.Addresses[i].AddressStr, err)
}
n.IPAM.Addresses[i].Address = *addr
n.IPAM.Addresses[i].Address.IP = ip
if err := canonicalizeIP(&n.IPAM.Addresses[i].Address.IP); err != nil {
return fmt.Errorf("invalid address %d: %s", i, err)
}
if n.IPAM.Addresses[i].Address.IP.To4() != nil {
n.IPAM.Addresses[i].Version = "4"
numV4++
} else {
n.IPAM.Addresses[i].Version = "6"
numV6++
}
}
newnumV6, newnumV4, err := handleEnvArgs(n, numV6, numV4, args)
if err != nil {
return err
}
numV4 = newnumV4
numV6 = newnumV6
// CNI spec 0.2.0 and below supported only one v4 and v6 address
if numV4 > 1 || numV6 > 1 {
for _, v := range types020.SupportedVersions {
if n.CNIVersion == v {
return fmt.Errorf("CNI version %v does not support more than 1 address per family", n.CNIVersion)
}
}
}
return nil
}
func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) {
// Once we have our basics, let's look for our (optional) configuration file
confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"}
confdirs = append(confdirs, extraConfigPaths...)
// We prefix the optional configuration path (so we look there first)
if !isControlLoop && IPAM != nil {
if IPAM.ConfigurationPath != "" {
confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...)
}
}
// Cycle through the path and parse the JSON config
flatipam := types.Net{}
foundflatfile := ""
for _, confpath := range confdirs {
if pathExists(confpath) {
jsonFile, err := os.Open(confpath)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err)
}
defer jsonFile.Close()
jsonBytes, err := io.ReadAll(jsonFile)
if err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - io.ReadAll error: %s", confpath, err)
}
if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil {
return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s", confpath, err, jsonBytes)
}
foundflatfile = confpath
return flatipam, foundflatfile, nil
}
}
return flatipam, foundflatfile, NewConfigFileNotFoundError()
}
func handleEnvArgs(n *types.Net, numV6 int, numV4 int, args types.IPAMEnvArgs) (int, int, error) {
if args.IP != "" {
for _, item := range strings.Split(string(args.IP), ",") {
ipstr := strings.TrimSpace(item)
ip, subnet, err := netutils.ParseCIDRSloppy(ipstr)
if err != nil {
return numV6, numV4, fmt.Errorf("invalid CIDR %s: %s", ipstr, err)
}
addr := types.Address{Address: net.IPNet{IP: ip, Mask: subnet.Mask}}
if addr.Address.IP.To4() != nil {
addr.Version = "4"
numV4++
} else {
addr.Version = "6"
numV6++
}
n.IPAM.Addresses = append(n.IPAM.Addresses, addr)
}
}
if args.GATEWAY != "" {
for _, item := range strings.Split(string(args.GATEWAY), ",") {
gwip := netutils.ParseIPSloppy(strings.TrimSpace(item))
if gwip == nil {
return numV6, numV4, fmt.Errorf("invalid gateway address: %s", item)
}
for i := range n.IPAM.Addresses {
if n.IPAM.Addresses[i].Address.Contains(gwip) {
n.IPAM.Addresses[i].Gateway = gwip
}
}
}
}
return numV6, numV4, nil
}
func LoadIPAMConfiguration(bytes []byte, envArgs string, extraConfigPaths ...string) (*types.IPAMConfig, error) {
pluginConfig, err := loadPluginConfig(bytes)
if err != nil {
return nil, err
}
if pluginConfig.Type == "" {
pluginConfigList, err := loadPluginConfigList(bytes)
if err != nil {
return nil, err
}
pluginConfigList.Plugins[0].CNIVersion = pluginConfig.CNIVersion
firstPluginBytes, err := json.Marshal(pluginConfigList.Plugins[0])
if err != nil {
return nil, err
}
ipamConfig, _, err := LoadIPAMConfig(firstPluginBytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
ipamConfig, _, err := LoadIPAMConfig(bytes, envArgs, extraConfigPaths...)
if err != nil {
return nil, err
}
return ipamConfig, nil
}
func loadPluginConfigList(bytes []byte) (*types.NetConfList, error) {
var netConfList types.NetConfList
if err := json.Unmarshal(bytes, &netConfList); err != nil {
return nil, err
}
return &netConfList, nil
}
func loadPluginConfig(bytes []byte) (*cnitypes.NetConf, error) {
var pluginConfig cnitypes.NetConf
if err := json.Unmarshal(bytes, &pluginConfig); err != nil {
return nil, err
}
return &pluginConfig, nil
}
func isNetworkRelevant(ipamConfig *types.IPAMConfig) bool {
const relevantIPAMType = "whereabouts"
return ipamConfig.Type == relevantIPAMType
}
type InvalidPluginError struct {
ipamType string
}
func NewInvalidPluginError(ipamType string) *InvalidPluginError {
return &InvalidPluginError{ipamType: ipamType}
}
func (e *InvalidPluginError) Error() string {
return fmt.Sprintf("only interested in networks whose IPAM type is 'whereabouts'. This one was: %s", e.ipamType)
}
type ConfigFileNotFoundError struct{}
func NewConfigFileNotFoundError() *ConfigFileNotFoundError {
return &ConfigFileNotFoundError{}
}
func (e *ConfigFileNotFoundError) Error() string {
return "config file not found"
}
func storageError() error {
return fmt.Errorf("you have not configured the storage engine (looks like you're using an invalid `kubernetes.kubeconfig` parameter in your config)")
}
| {
var n types.Net
if err := json.Unmarshal(bytes, &n); err != nil {
return nil, "", fmt.Errorf("LoadIPAMConfig - JSON Parsing Error: %s / bytes: %s", err, bytes)
}
if n.IPAM == nil {
return nil, "", fmt.Errorf("IPAM config missing 'ipam' key")
} else if !isNetworkRelevant(n.IPAM) {
return nil, "", NewInvalidPluginError(n.IPAM.Type)
}
args := types.IPAMEnvArgs{}
if err := cnitypes.LoadArgs(envArgs, &args); err != nil {
return nil, "", fmt.Errorf("LoadArgs - CNI Args Parsing Error: %s", err)
}
n.IPAM.PodName = string(args.K8S_POD_NAME)
n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE)
flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...)
if err != nil {
return nil, "", err
}
// Now let's try to merge the configurations...
// NB: Don't try to do any initialization before this point or it won't account for merged flat file.
var OverlappingRanges bool = n.IPAM.OverlappingRanges
if err := mergo.Merge(&n, flatipam); err != nil {
logging.Errorf("Merge error with flat file: %s", err)
}
n.IPAM.OverlappingRanges = OverlappingRanges
// Logging
if n.IPAM.LogFile != "" {
logging.SetLogFile(n.IPAM.LogFile)
}
if n.IPAM.LogLevel != "" {
logging.SetLogLevel(n.IPAM.LogLevel)
}
if foundflatfile != "" {
logging.Debugf("Used defaults from parsed flat file config @ %s", foundflatfile)
}
if n.IPAM.Range != "" {
oldRange := types.RangeConfiguration{
OmitRanges: n.IPAM.OmitRanges,
Range: n.IPAM.Range,
RangeStart: n.IPAM.RangeStart,
RangeEnd: n.IPAM.RangeEnd,
}
n.IPAM.IPRanges = append([]types.RangeConfiguration{oldRange}, n.IPAM.IPRanges...)
}
for idx := range n.IPAM.IPRanges {
if r := strings.SplitN(n.IPAM.IPRanges[idx].Range, "-", 2); len(r) == 2 {
firstip := netutils.ParseIPSloppy(r[0])
if firstip == nil {
return nil, "", fmt.Errorf("invalid range start IP: %s", r[0])
}
lastip, ipNet, err := netutils.ParseCIDRSloppy(r[1])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR (do you have the 'range' parameter set for Whereabouts?) '%s': %s", r[1], err)
}
if !ipNet.Contains(firstip) {
return nil, "", fmt.Errorf("invalid range start for CIDR %s: %s", ipNet.String(), firstip)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
n.IPAM.IPRanges[idx].RangeStart = firstip
n.IPAM.IPRanges[idx].RangeEnd = lastip
} else {
firstip, ipNet, err := netutils.ParseCIDRSloppy(n.IPAM.IPRanges[idx].Range)
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR %s: %s", n.IPAM.IPRanges[idx].Range, err)
}
n.IPAM.IPRanges[idx].Range = ipNet.String()
if n.IPAM.IPRanges[idx].RangeStart == nil {
firstip = netutils.ParseIPSloppy(firstip.Mask(ipNet.Mask).String()) // if range_start is not net then pick the first network address
n.IPAM.IPRanges[idx].RangeStart = firstip
}
}
}
n.IPAM.OmitRanges = nil
n.IPAM.Range = ""
n.IPAM.RangeStart = nil
n.IPAM.RangeEnd = nil
if n.IPAM.Kubernetes.KubeConfigPath == "" {
return nil, "", storageError()
}
if n.IPAM.GatewayStr != "" {
gwip := netutils.ParseIPSloppy(n.IPAM.GatewayStr)
if gwip == nil {
return nil, "", fmt.Errorf("couldn't parse gateway IP: %s", n.IPAM.GatewayStr)
}
n.IPAM.Gateway = gwip
}
for i := range n.IPAM.OmitRanges {
_, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i])
if err != nil {
return nil, "", fmt.Errorf("invalid CIDR in exclude list %s: %s", n.IPAM.OmitRanges[i], err)
}
}
if err := configureStatic(&n, args); err != nil {
return nil, "", err
}
if n.IPAM.LeaderLeaseDuration == 0 {
n.IPAM.LeaderLeaseDuration = types.DefaultLeaderLeaseDuration
}
if n.IPAM.LeaderRenewDeadline == 0 {
n.IPAM.LeaderRenewDeadline = types.DefaultLeaderRenewDeadline
}
if n.IPAM.LeaderRetryPeriod == 0 {
n.IPAM.LeaderRetryPeriod = types.DefaultLeaderRetryPeriod
}
// Copy net name into IPAM so not to drag Net struct around
n.IPAM.Name = n.Name
return n.IPAM, n.CNIVersion, nil
} | identifier_body |
ontology.js | /*
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2010, Qiime Web Analysis"
__credits__ = ["Jesse Stombaugh", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.0.0.dev"
__maintainer__ = ["Jesse Stombaugh"]
__email__ = "[email protected]"
__status__ = "Production"
*/
var xmlhttp
var geocoder;
var map;
var marker;
var latitude=new Array();
var longitude=new Array();
var elevation=new Array();
var markersArray = [];
var infoWindowArray = [];
/*This changes the color of the table background when a user mouses over the Tool buttons. */
function mouseover(key){
cell=document.getElementById(key);
if (cell.bgcolor=='black'){
cell.bgcolor=='blue';
}else{
cell.bgcolor=='black';
}
}
/* These two function turn on/off the visibility of the two tools */
function displayOntology() |
function displayGeography(){
document.getElementById("ontology_lookup").style.display='none';
document.getElementById("geographic_location").style.display='';
document.getElementById("map_canvas").style.visibility='visible';
}
/* Initializes the Google Map. */
function initialize(){
geocoder = new google.maps.Geocoder();
var latlng = new google.maps.LatLng(0,0);
var myOptions = {
zoom: 1,
center: latlng,
mapTypeId: google.maps.MapTypeId.ROADMAP
}
map = new google.maps.Map(document.getElementById("map_canvas"), myOptions);
}
/* Removes the overlays from the map, but keeps them in the array*
//not used, since we are re-initializing the map
function clearOverlays() {
if (markersArray) {
for (i in markersArray) {
markersArray[i].setMap(null)
//infoWindowArray[i].close()
}
markersArray.length = 0;
//infoWindowArray.length = 0;
//markersArray=new Array();
}
}
*/
var elevator = new google.maps.ElevationService();
/* This function preps the addresses and calls the geocoder */
function codeAddress() {
//we reinitialize each time this is called, so it recenters on the world
//I did this since it is difficult to zoom based on the lat/lngs
initialize();
//get the locations from the input box
var address = document.getElementById("address").value;
//convert the input box into an array
address_array=convert_terms_to_array(address)
//iterate over the addresses and append the "loc:" tag to the beginning
//which overwrites google point of interest detector
saved_address_array=new Array();
for (var i=0; i<address_array.length-1; i++){
if (address_array[i] != ''){
address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
saved_address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
}else if (address_array[i] == '' && address_array[i-1]!=''){
address_array[i]=address_array[i-1]
saved_address_array[i]=address_array[i-1]
}else{
saved_address_array[i]=address_array[i]
}
}
//get a unique list of the address
unique_addresses=unique(address_array)
//no longer needed since we are re-initializing
//clearOverlays();
latitude=new Array();
longitude=new Array();
elevation=new Array();
var latlong
var iterator=0;
timer_ms=0;
if (geocoder) {
//give status updates
document.getElementById("loading_status").innerHTML='Loading coordinates'
//iterate over the addresses and append a timing event, since google
//has a query limit per second
for (var i=0; i<unique_addresses.length; i++){
if (unique_addresses[i]!=''){
var lat2=setTimeout('geocode_results('+i+')',timer_ms)
timer_ms+=700
}
}
//append to the status after all points should have loaded
setTimeout("document.getElementById('loading_status').innerHTML='Completed'",timer_ms)
}
}
/* This function gets the Lat/Long using Google Maps Geocoder API. */
function geocode_results(i){
//query google maps for lat/lngs
geocoder.geocode( { 'address': unique_addresses[i]}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
var latlong = new google.maps.LatLng(results[0].geometry.location.lat(),results[0].geometry.location.lng());
var pinImage = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_letter&chld= |" + 'FF0000',
new google.maps.Size(21, 34),
new google.maps.Point(0,0),
new google.maps.Point(10, 34));
var pinShadow = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_shadow",
new google.maps.Size(40, 37),
new google.maps.Point(0, 0),
new google.maps.Point(12, 35));
/* This function gets the Elevation using Google Maps Elevations API. */
elevator.getElevationForLocations({'locations':[latlong]}, function(results2, status2){
if (status == google.maps.ElevationStatus.OK) {
// Retrieve the first result
if (results2[0]) {
//assign lat/lng/elev to arrays
latitude[unique_addresses[i]]=results2[0].location.lat()
longitude[unique_addresses[i]]=results2[0].location.lng()
elevation[unique_addresses[i]]=results2[0].elevation;
//put a pointer on the map
markersArray[unique_addresses[i]] = new google.maps.Marker({
position: latlong,
map: map,
color: '#FF0000',
clickable: false,
icon: pinImage,
shadow: pinShadow
});
} else {
alert("No elevation results found!");
}
}
});
}else{
alert(status)
alert("Unable to find the Location you specified!");
}
})
}
/* This function outputs the Lat/Long/Elev to the Console. */
function output_latlong(){
//generate the output content
type=document.getElementById('latlngType').value
var content='';
for (var i=0; i<saved_address_array.length; i++) {
if (type=='Latitude'){
content=content+latitude[saved_address_array[i]]+'<br>';
}else if (type=='Longitude'){
content=content+longitude[saved_address_array[i]]+'<br>';
}else if (type=='Elevation'){
content=content+elevation[saved_address_array[i]]+'<br>';
}
}
//write page
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This is the AJAX function which produces the list of terms below each input
box. It takes as input:
1) the ontology select box id
2) the query string
3) the input box id
4) the txt box below input id
*/
function showResult(ont_id,str,inputbox_id,txt_id)
{
// If the substring length is empty, then do nothing
if (str.length==0)
{
return;
}
// If the substring is at least one in length, then search for similar terms
// in the ontologies selected. This is where we can set the length to start
//searches (i.e. after 3 letters are present.
else if (str.length>0){
//remove text or checkmark next to the input box and change font color
//to black
document.getElementById('valid'+inputbox_id).innerHTML="";
document.getElementById('valid'+inputbox_id).style.color="black";
xmlhttp=GetXmlHttpObject()
//check if browser can perform xmlhttp
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
//get the list of ontologies using the ontology id
ont_list=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ont_list)
//generate a url string where we pass our variables
var url="ontology_search.psp";
url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4){
//write the list of similar terms from the database
document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText;
document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2";
xmlhttp.close();
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
}
}
/*
This is the AJAX function which validates the terms in each input
box. It takes as input:
1) the ontology select box id
2) the initial list of ontology terms from user
3) the table where all results should be written
4) whether this is the first call of this function
5) whether this is an export call
*/
function validateInput(ont_id,ont_term_list,table_id,new_data,export_data)
{
//if no data is input produce an alert
if (ont_term_list.length==0){
alert("Paste some data in the input box!");
return;
}
//get the list of ontologies using the ontology id
ontologies=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ontologies)
//if no ontology is selected produce an alert
if (selected_ont==''){
alert("Select at least one Ontology!")
return;
}
//take the pasted terms from user and convert those terms to an array
ont_term_array=convert_terms_to_array(ont_term_list);
//save this original list of terms from the user into an array
original_ont_term_array=ont_term_array;
//create an array to store the terms from the input boxes as they are being
//modified
updated_unique_terms=new Array();
//if this is the first call to this function, create a unique list of terms
//build the input boxes
if (new_data == 'True')
{
//original_ont_term_array=new Array();
original_unique_terms=new Array();
//remove old input boxes, so the user can re-use the app over and over
clear_inputs(table_id)
//generate unique list and input boxes
unique_ont_array=write_input_boxes(ont_term_array,table_id);
//store unique ontology terms for later use
original_unique_terms=unique_ont_array;
updated_unique_terms=unique_ont_array;
}
//If this is not the first call, retrieve values from input boxes
else
{
//get the values from the input boxes
unique_ont_array=get_inputs(unique_ont_array);
updated_unique_terms=unique_ont_array;
}
//check if browser can perform xmlhttp
xmlhttp=GetXmlHttpObject()
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
/*
var url="ontology_validate.psp";
url=url+"?ont_id="+selected_ont+"&ont_terms="+unique_ont_array;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4)
{
//since the response from the PL/SQL is a string using the "#'
//delimitor, so we need to split and write them to the table
validity=xmlhttp.responseText.split('#')
for (var i=0; i<validity.length;i++){
//determine if an input value is valid and write 'Invalid' or a
//checkbox accordingly
if (validity[i]=='Valid' || validity[i]=='Valid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML='✓';
document.getElementById('validtxtbox'+(i)).style.color="green";
}else if (validity[i]=='Invalid' || validity[i]=='Invalid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML=validity[i];
document.getElementById('validtxtbox'+(i)).style.color="red";
}
}
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
*/
//If the data is supposed to be exported, write the data to the new window
if (export_data=='True'){
write_data_to_new_window(original_ont_term_array,original_unique_terms,updated_unique_terms);
}
}
function GetXmlHttpObject()
{
if (window.XMLHttpRequest)
{
// code for IE7+, Firefox, Chrome, Opera, Safari
return new XMLHttpRequest();
}
if (window.ActiveXObject)
{
// code for IE6, IE5
return new ActiveXObject("Microsoft.XMLHTTP");
}
return null;
}
/*
This function changes the input box value when the user clicks on a term
in the list of terms
*/
function change_form_value(form_field,form_value,inputbox_id){
//change the input box value
document.getElementById(form_field).value=form_value;
//Clear the list of ontology terms
document.getElementById('input'+inputbox_id).innerHTML='';
document.getElementById('input'+inputbox_id).style.border="0px";
//Add a checkmark next to the input box
document.getElementById('valid'+form_field).innerHTML='✓';
document.getElementById('valid'+form_field).style.color="green";
}
/*
when iterating through list of ontology terms, upon onfocus, this changes
the background to cyan
*/
function setStyle(x)
{
document.getElementById(x).style.background="cyan"
}
/*
when iterating through list of ontology terms, when removing focus (onblur),
this changes the background to cyan
*/
function removeStyle(x)
{
document.getElementById(x).style.background="white"
}
/*
This function checks to see if all input boxes are valid, updates the original
list of terms from the user, with the corrected terms, then calls the
function to write the data to the new window
*/
function write_data_to_new_window(original_ont_term_array, original_unique_terms, updated_unique_terms){
//Determine that all terms are valid
for (var i=0;i<original_unique_terms.length;i++){
if (original_unique_terms[i]!=''){
validity=document.getElementById('validtxtbox'+(i)).innerHTML
if ( validity=='' || validity=='Click Input Box...'){
alert('You need choose valid terms!');
return;
}else if (validity=='Invalid' || validity=='Invalid\n'){
alert('You have invalid terms!');
return;
}
}
}
//generate a new array with update terms based on the valid input boxes
output_array=new Array();
//using length-1 since we appended an empty element to the list in the
//convert_terms_to_array function.
for (var j=0;j<original_ont_term_array.length-1;j++){
for (var k=0;k<original_unique_terms.length;k++){
if (original_ont_term_array[j]==original_unique_terms[k]){
output_array.push(updated_unique_terms[k]);
}
}
if(original_ont_term_array[j]=='' && j!=0){
output_array.push(output_array[j-1]);
}else if(original_ont_term_array[j]=='' && j==0){
output_array.push('n/a');
}
}
//write the array to the new window
writeConsole(output_array.join('<br>'));
}
/*
This function creates a new console window and writes an html page containing
the corrected list of terms
*/
function writeConsole(content)
{
//open new window
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
//write page
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This function gets the terms from the input boxes and puts them into an array
*/
function get_inputs(unique_ont_array){
new_unique_ont_array= new Array();
for (var i = 0; i<unique_ont_array.length;i++){
if (unique_ont_array[i]!=''){
new_unique_ont_array[i]=document.getElementById('txtbox'+i).value;
}
}
return new_unique_ont_array;
}
/*
This function gets a list of selected ontologies, concatenates them and formats
them as a string to be used by PL/SQL
*/
function get_selected(selObject){
var arSelected = new Array();
for (i=0;i<selObject.options.length;i++){
if (selObject.options[i].selected==true){
arSelected.push('\''+selObject.options[i].value+'\'');
};
}
onts=arSelected.join(',');
return onts
}
/*
This function takes an array and produces another array with only unique values
*/
function unique(a){
for( var i=a.length; --i>-1; ) {
for( var j=a.length; --j>-1; )
{
//must convert to uppercase for comparison, so there are no
//case-sensitivity issues
if(i != j && a[i].toUpperCase() == a[j].toUpperCase() && a[j]!='') a.splice(i,1);
}
}
//filter out the empty strings
var unique_terms=new Array();
for (var k=0;k<a.length;k++){
if (a[k] != ''){
unique_terms.push(a[k]);
}
}
return unique_terms;
}
/*
This function clears the result table, so the user can perform many searches
*/
function clear_inputs(table_id){
for(var i = document.getElementById(table_id).rows.length; i > 0;i--)
{
document.getElementById(table_id).deleteRow(i -1);
}
}
/*
This function checks that text was typed in the input textarea and then takes
that list and converts the string into an array based on new-line character
*/
function convert_terms_to_array(ont_term_list){
ont_term_array=ont_term_list.split('\n')
//need to add another element to array, since there may not be a new line
//at the end of the list
ont_term_array.push('');
if (ont_term_array=='')
{
alert("Input a list of terms!")
return;
}
filtered_array=new Array();
for (var i=0;i<ont_term_array.length;i++){
filtered_array.push(ont_term_array[i])
/* if (ont_term_array[i]!=''){
filtered_array.push(ont_term_array[i])
}*/
}
return filtered_array;
}
/*
This function writes the input boxes into the results table. Each input box has
4 fields that are created as follows:
================== ======================
input-box-validity input-box
------------------ ----------------------
empty field list-of-ontology-terms
================== ======================
...
*/
function write_input_boxes(ont_term_array,table_id){
//create the header fields in the table (2 columns)
unique_ont_array=unique(ont_term_array);
var otable = document.getElementById(table_id);
var row1 = document.createElement("TR");
var th1 = document.createElement("TH");
th1.appendChild(document.createTextNode("Valid:"));
row1.appendChild(th1);
var th2 = document.createElement("TH");
th2.appendChild(document.createTextNode("Distinct Terms from Input List:"));
row1.appendChild(th2);
otable.appendChild(row1);
for (var i = 0; i<unique_ont_array.length;i++){
//make sure the array field is not empty, which happens when performing
//javascript split() and join() functions
if (unique_ont_array[i]!=''){
//create the input-box-validity field
var row2 = document.createElement("TR");
var td1 = document.createElement("TD");
td1.id='validtxtbox'+i;
td1.style.width='130px';
td1.appendChild(document.createTextNode('Click Input Box...'));
row2.appendChild(td1);
//create the input-box field
var td2 = document.createElement("TD");
var input1 = document.createElement("input");
input1.value=unique_ont_array[i];
input1.id='txtbox'+i;
input1.setAttribute('size','50');
input1.setAttribute('onkeyup',"showResult('ontologies',this.value,this.id,this.id)");
input1.setAttribute('onclick',"showResult('ontologies',this.value,this.id,this.id)");
td2.appendChild(input1);
row2.appendChild(td2);
otable.appendChild(row2);
//create the empty field
var row3 = document.createElement("TR");
var td3 = document.createElement("TD");
row3.appendChild(td3);
//ccreate the list-of-ontology-terms field
var td4 = document.createElement("TD");
td4.id='input'+input1.id;
row3.appendChild(td4);
otable.appendChild(row3);
}
}
return unique_ont_array;
}
//this function resets the default select option and is for the select box
//above the ontology select field
function reset_select(selObject){
for (i=0;i<selObject.options.length;i++){
selObject.options[i].selected=false;
}
}
//this function selects all from the select options and is for the select box
//above the ontology select field
function select_all(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=true;
}
}
//this function selects none from the select options and is for the select box
//above the ontology select field
function select_none(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=false;
}
}
//this function inverts the selection from the select options and is for the
//select box above the ontology select field
function select_invert(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
if (listbox_values.options[i].selected==true){
listbox_values.options[i].selected=false;
}else{
listbox_values.options[i].selected=true;
}
}
}
| {
document.getElementById("ontology_lookup").style.display='';
document.getElementById("geographic_location").style.display='none';
document.getElementById("map_canvas").style.visibility='hidden';
} | identifier_body |
ontology.js | /*
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2010, Qiime Web Analysis"
__credits__ = ["Jesse Stombaugh", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.0.0.dev"
__maintainer__ = ["Jesse Stombaugh"]
__email__ = "[email protected]"
__status__ = "Production"
*/
var xmlhttp
var geocoder;
var map;
var marker;
var latitude=new Array();
var longitude=new Array();
var elevation=new Array();
var markersArray = [];
var infoWindowArray = [];
/*This changes the color of the table background when a user mouses over the Tool buttons. */
function mouseover(key){
cell=document.getElementById(key);
if (cell.bgcolor=='black'){
cell.bgcolor=='blue';
}else{
cell.bgcolor=='black';
}
}
/* These two function turn on/off the visibility of the two tools */
function displayOntology(){
document.getElementById("ontology_lookup").style.display='';
document.getElementById("geographic_location").style.display='none';
document.getElementById("map_canvas").style.visibility='hidden';
}
function displayGeography(){
document.getElementById("ontology_lookup").style.display='none';
document.getElementById("geographic_location").style.display='';
document.getElementById("map_canvas").style.visibility='visible';
}
/* Initializes the Google Map. */
function initialize(){
geocoder = new google.maps.Geocoder();
var latlng = new google.maps.LatLng(0,0);
var myOptions = {
zoom: 1,
center: latlng,
mapTypeId: google.maps.MapTypeId.ROADMAP
}
map = new google.maps.Map(document.getElementById("map_canvas"), myOptions);
}
/* Removes the overlays from the map, but keeps them in the array*
//not used, since we are re-initializing the map
function clearOverlays() {
if (markersArray) {
for (i in markersArray) {
markersArray[i].setMap(null)
//infoWindowArray[i].close()
}
markersArray.length = 0;
//infoWindowArray.length = 0;
//markersArray=new Array();
}
}
*/
var elevator = new google.maps.ElevationService();
/* This function preps the addresses and calls the geocoder */
function codeAddress() {
//we reinitialize each time this is called, so it recenters on the world
//I did this since it is difficult to zoom based on the lat/lngs
initialize();
//get the locations from the input box
var address = document.getElementById("address").value;
//convert the input box into an array
address_array=convert_terms_to_array(address)
//iterate over the addresses and append the "loc:" tag to the beginning
//which overwrites google point of interest detector
saved_address_array=new Array();
for (var i=0; i<address_array.length-1; i++){
if (address_array[i] != ''){
address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
saved_address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
}else if (address_array[i] == '' && address_array[i-1]!=''){
address_array[i]=address_array[i-1]
saved_address_array[i]=address_array[i-1]
}else{
saved_address_array[i]=address_array[i]
}
}
//get a unique list of the address
unique_addresses=unique(address_array)
//no longer needed since we are re-initializing
//clearOverlays();
latitude=new Array();
longitude=new Array();
elevation=new Array();
var latlong
var iterator=0;
timer_ms=0;
if (geocoder) {
//give status updates
document.getElementById("loading_status").innerHTML='Loading coordinates'
//iterate over the addresses and append a timing event, since google
//has a query limit per second
for (var i=0; i<unique_addresses.length; i++){
if (unique_addresses[i]!=''){
var lat2=setTimeout('geocode_results('+i+')',timer_ms)
timer_ms+=700
}
}
//append to the status after all points should have loaded
setTimeout("document.getElementById('loading_status').innerHTML='Completed'",timer_ms)
}
}
/* This function gets the Lat/Long using Google Maps Geocoder API. */
function geocode_results(i){
//query google maps for lat/lngs
geocoder.geocode( { 'address': unique_addresses[i]}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
var latlong = new google.maps.LatLng(results[0].geometry.location.lat(),results[0].geometry.location.lng());
var pinImage = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_letter&chld= |" + 'FF0000',
new google.maps.Size(21, 34),
new google.maps.Point(0,0),
new google.maps.Point(10, 34));
var pinShadow = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_shadow",
new google.maps.Size(40, 37),
new google.maps.Point(0, 0),
new google.maps.Point(12, 35));
/* This function gets the Elevation using Google Maps Elevations API. */
elevator.getElevationForLocations({'locations':[latlong]}, function(results2, status2){
if (status == google.maps.ElevationStatus.OK) {
// Retrieve the first result
if (results2[0]) {
//assign lat/lng/elev to arrays
latitude[unique_addresses[i]]=results2[0].location.lat()
longitude[unique_addresses[i]]=results2[0].location.lng()
elevation[unique_addresses[i]]=results2[0].elevation;
//put a pointer on the map
markersArray[unique_addresses[i]] = new google.maps.Marker({
position: latlong,
map: map,
color: '#FF0000',
clickable: false,
icon: pinImage,
shadow: pinShadow
});
} else {
alert("No elevation results found!");
}
}
});
}else{
alert(status)
alert("Unable to find the Location you specified!");
}
})
}
/* This function outputs the Lat/Long/Elev to the Console. */
function | (){
//generate the output content
type=document.getElementById('latlngType').value
var content='';
for (var i=0; i<saved_address_array.length; i++) {
if (type=='Latitude'){
content=content+latitude[saved_address_array[i]]+'<br>';
}else if (type=='Longitude'){
content=content+longitude[saved_address_array[i]]+'<br>';
}else if (type=='Elevation'){
content=content+elevation[saved_address_array[i]]+'<br>';
}
}
//write page
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This is the AJAX function which produces the list of terms below each input
box. It takes as input:
1) the ontology select box id
2) the query string
3) the input box id
4) the txt box below input id
*/
function showResult(ont_id,str,inputbox_id,txt_id)
{
// If the substring length is empty, then do nothing
if (str.length==0)
{
return;
}
// If the substring is at least one in length, then search for similar terms
// in the ontologies selected. This is where we can set the length to start
//searches (i.e. after 3 letters are present.
else if (str.length>0){
//remove text or checkmark next to the input box and change font color
//to black
document.getElementById('valid'+inputbox_id).innerHTML="";
document.getElementById('valid'+inputbox_id).style.color="black";
xmlhttp=GetXmlHttpObject()
//check if browser can perform xmlhttp
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
//get the list of ontologies using the ontology id
ont_list=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ont_list)
//generate a url string where we pass our variables
var url="ontology_search.psp";
url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4){
//write the list of similar terms from the database
document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText;
document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2";
xmlhttp.close();
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
}
}
/*
This is the AJAX function which validates the terms in each input
box. It takes as input:
1) the ontology select box id
2) the initial list of ontology terms from user
3) the table where all results should be written
4) whether this is the first call of this function
5) whether this is an export call
*/
function validateInput(ont_id,ont_term_list,table_id,new_data,export_data)
{
//if no data is input produce an alert
if (ont_term_list.length==0){
alert("Paste some data in the input box!");
return;
}
//get the list of ontologies using the ontology id
ontologies=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ontologies)
//if no ontology is selected produce an alert
if (selected_ont==''){
alert("Select at least one Ontology!")
return;
}
//take the pasted terms from user and convert those terms to an array
ont_term_array=convert_terms_to_array(ont_term_list);
//save this original list of terms from the user into an array
original_ont_term_array=ont_term_array;
//create an array to store the terms from the input boxes as they are being
//modified
updated_unique_terms=new Array();
//if this is the first call to this function, create a unique list of terms
//build the input boxes
if (new_data == 'True')
{
//original_ont_term_array=new Array();
original_unique_terms=new Array();
//remove old input boxes, so the user can re-use the app over and over
clear_inputs(table_id)
//generate unique list and input boxes
unique_ont_array=write_input_boxes(ont_term_array,table_id);
//store unique ontology terms for later use
original_unique_terms=unique_ont_array;
updated_unique_terms=unique_ont_array;
}
//If this is not the first call, retrieve values from input boxes
else
{
//get the values from the input boxes
unique_ont_array=get_inputs(unique_ont_array);
updated_unique_terms=unique_ont_array;
}
//check if browser can perform xmlhttp
xmlhttp=GetXmlHttpObject()
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
/*
var url="ontology_validate.psp";
url=url+"?ont_id="+selected_ont+"&ont_terms="+unique_ont_array;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4)
{
//since the response from the PL/SQL is a string using the "#'
//delimitor, so we need to split and write them to the table
validity=xmlhttp.responseText.split('#')
for (var i=0; i<validity.length;i++){
//determine if an input value is valid and write 'Invalid' or a
//checkbox accordingly
if (validity[i]=='Valid' || validity[i]=='Valid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML='✓';
document.getElementById('validtxtbox'+(i)).style.color="green";
}else if (validity[i]=='Invalid' || validity[i]=='Invalid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML=validity[i];
document.getElementById('validtxtbox'+(i)).style.color="red";
}
}
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
*/
//If the data is supposed to be exported, write the data to the new window
if (export_data=='True'){
write_data_to_new_window(original_ont_term_array,original_unique_terms,updated_unique_terms);
}
}
function GetXmlHttpObject()
{
if (window.XMLHttpRequest)
{
// code for IE7+, Firefox, Chrome, Opera, Safari
return new XMLHttpRequest();
}
if (window.ActiveXObject)
{
// code for IE6, IE5
return new ActiveXObject("Microsoft.XMLHTTP");
}
return null;
}
/*
This function changes the input box value when the user clicks on a term
in the list of terms
*/
function change_form_value(form_field,form_value,inputbox_id){
//change the input box value
document.getElementById(form_field).value=form_value;
//Clear the list of ontology terms
document.getElementById('input'+inputbox_id).innerHTML='';
document.getElementById('input'+inputbox_id).style.border="0px";
//Add a checkmark next to the input box
document.getElementById('valid'+form_field).innerHTML='✓';
document.getElementById('valid'+form_field).style.color="green";
}
/*
when iterating through list of ontology terms, upon onfocus, this changes
the background to cyan
*/
function setStyle(x)
{
document.getElementById(x).style.background="cyan"
}
/*
when iterating through list of ontology terms, when removing focus (onblur),
this changes the background to cyan
*/
function removeStyle(x)
{
document.getElementById(x).style.background="white"
}
/*
This function checks to see if all input boxes are valid, updates the original
list of terms from the user, with the corrected terms, then calls the
function to write the data to the new window
*/
function write_data_to_new_window(original_ont_term_array, original_unique_terms, updated_unique_terms){
//Determine that all terms are valid
for (var i=0;i<original_unique_terms.length;i++){
if (original_unique_terms[i]!=''){
validity=document.getElementById('validtxtbox'+(i)).innerHTML
if ( validity=='' || validity=='Click Input Box...'){
alert('You need choose valid terms!');
return;
}else if (validity=='Invalid' || validity=='Invalid\n'){
alert('You have invalid terms!');
return;
}
}
}
//generate a new array with update terms based on the valid input boxes
output_array=new Array();
//using length-1 since we appended an empty element to the list in the
//convert_terms_to_array function.
for (var j=0;j<original_ont_term_array.length-1;j++){
for (var k=0;k<original_unique_terms.length;k++){
if (original_ont_term_array[j]==original_unique_terms[k]){
output_array.push(updated_unique_terms[k]);
}
}
if(original_ont_term_array[j]=='' && j!=0){
output_array.push(output_array[j-1]);
}else if(original_ont_term_array[j]=='' && j==0){
output_array.push('n/a');
}
}
//write the array to the new window
writeConsole(output_array.join('<br>'));
}
/*
This function creates a new console window and writes an html page containing
the corrected list of terms
*/
function writeConsole(content)
{
//open new window
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
//write page
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This function gets the terms from the input boxes and puts them into an array
*/
function get_inputs(unique_ont_array){
new_unique_ont_array= new Array();
for (var i = 0; i<unique_ont_array.length;i++){
if (unique_ont_array[i]!=''){
new_unique_ont_array[i]=document.getElementById('txtbox'+i).value;
}
}
return new_unique_ont_array;
}
/*
This function gets a list of selected ontologies, concatenates them and formats
them as a string to be used by PL/SQL
*/
function get_selected(selObject){
var arSelected = new Array();
for (i=0;i<selObject.options.length;i++){
if (selObject.options[i].selected==true){
arSelected.push('\''+selObject.options[i].value+'\'');
};
}
onts=arSelected.join(',');
return onts
}
/*
This function takes an array and produces another array with only unique values
*/
function unique(a){
for( var i=a.length; --i>-1; ) {
for( var j=a.length; --j>-1; )
{
//must convert to uppercase for comparison, so there are no
//case-sensitivity issues
if(i != j && a[i].toUpperCase() == a[j].toUpperCase() && a[j]!='') a.splice(i,1);
}
}
//filter out the empty strings
var unique_terms=new Array();
for (var k=0;k<a.length;k++){
if (a[k] != ''){
unique_terms.push(a[k]);
}
}
return unique_terms;
}
/*
This function clears the result table, so the user can perform many searches
*/
function clear_inputs(table_id){
for(var i = document.getElementById(table_id).rows.length; i > 0;i--)
{
document.getElementById(table_id).deleteRow(i -1);
}
}
/*
This function checks that text was typed in the input textarea and then takes
that list and converts the string into an array based on new-line character
*/
function convert_terms_to_array(ont_term_list){
ont_term_array=ont_term_list.split('\n')
//need to add another element to array, since there may not be a new line
//at the end of the list
ont_term_array.push('');
if (ont_term_array=='')
{
alert("Input a list of terms!")
return;
}
filtered_array=new Array();
for (var i=0;i<ont_term_array.length;i++){
filtered_array.push(ont_term_array[i])
/* if (ont_term_array[i]!=''){
filtered_array.push(ont_term_array[i])
}*/
}
return filtered_array;
}
/*
This function writes the input boxes into the results table. Each input box has
4 fields that are created as follows:
================== ======================
input-box-validity input-box
------------------ ----------------------
empty field list-of-ontology-terms
================== ======================
...
*/
function write_input_boxes(ont_term_array,table_id){
//create the header fields in the table (2 columns)
unique_ont_array=unique(ont_term_array);
var otable = document.getElementById(table_id);
var row1 = document.createElement("TR");
var th1 = document.createElement("TH");
th1.appendChild(document.createTextNode("Valid:"));
row1.appendChild(th1);
var th2 = document.createElement("TH");
th2.appendChild(document.createTextNode("Distinct Terms from Input List:"));
row1.appendChild(th2);
otable.appendChild(row1);
for (var i = 0; i<unique_ont_array.length;i++){
//make sure the array field is not empty, which happens when performing
//javascript split() and join() functions
if (unique_ont_array[i]!=''){
//create the input-box-validity field
var row2 = document.createElement("TR");
var td1 = document.createElement("TD");
td1.id='validtxtbox'+i;
td1.style.width='130px';
td1.appendChild(document.createTextNode('Click Input Box...'));
row2.appendChild(td1);
//create the input-box field
var td2 = document.createElement("TD");
var input1 = document.createElement("input");
input1.value=unique_ont_array[i];
input1.id='txtbox'+i;
input1.setAttribute('size','50');
input1.setAttribute('onkeyup',"showResult('ontologies',this.value,this.id,this.id)");
input1.setAttribute('onclick',"showResult('ontologies',this.value,this.id,this.id)");
td2.appendChild(input1);
row2.appendChild(td2);
otable.appendChild(row2);
//create the empty field
var row3 = document.createElement("TR");
var td3 = document.createElement("TD");
row3.appendChild(td3);
//ccreate the list-of-ontology-terms field
var td4 = document.createElement("TD");
td4.id='input'+input1.id;
row3.appendChild(td4);
otable.appendChild(row3);
}
}
return unique_ont_array;
}
//this function resets the default select option and is for the select box
//above the ontology select field
function reset_select(selObject){
for (i=0;i<selObject.options.length;i++){
selObject.options[i].selected=false;
}
}
//this function selects all from the select options and is for the select box
//above the ontology select field
function select_all(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=true;
}
}
//this function selects none from the select options and is for the select box
//above the ontology select field
function select_none(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=false;
}
}
//this function inverts the selection from the select options and is for the
//select box above the ontology select field
function select_invert(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
if (listbox_values.options[i].selected==true){
listbox_values.options[i].selected=false;
}else{
listbox_values.options[i].selected=true;
}
}
}
| output_latlong | identifier_name |
ontology.js | /*
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2010, Qiime Web Analysis"
__credits__ = ["Jesse Stombaugh", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.0.0.dev"
__maintainer__ = ["Jesse Stombaugh"]
__email__ = "[email protected]"
__status__ = "Production"
*/
var xmlhttp
var geocoder;
var map;
var marker;
var latitude=new Array();
var longitude=new Array();
var elevation=new Array();
var markersArray = [];
var infoWindowArray = [];
/*This changes the color of the table background when a user mouses over the Tool buttons. */
function mouseover(key){
cell=document.getElementById(key);
if (cell.bgcolor=='black'){
cell.bgcolor=='blue';
}else{
cell.bgcolor=='black';
}
}
/* These two function turn on/off the visibility of the two tools */
function displayOntology(){
document.getElementById("ontology_lookup").style.display='';
document.getElementById("geographic_location").style.display='none';
document.getElementById("map_canvas").style.visibility='hidden';
}
function displayGeography(){
document.getElementById("ontology_lookup").style.display='none';
document.getElementById("geographic_location").style.display='';
document.getElementById("map_canvas").style.visibility='visible';
}
/* Initializes the Google Map. */
function initialize(){
geocoder = new google.maps.Geocoder();
var latlng = new google.maps.LatLng(0,0);
var myOptions = {
zoom: 1,
center: latlng,
mapTypeId: google.maps.MapTypeId.ROADMAP
}
map = new google.maps.Map(document.getElementById("map_canvas"), myOptions);
}
/* Removes the overlays from the map, but keeps them in the array*
//not used, since we are re-initializing the map
function clearOverlays() {
if (markersArray) {
for (i in markersArray) {
markersArray[i].setMap(null)
//infoWindowArray[i].close()
}
markersArray.length = 0;
//infoWindowArray.length = 0;
//markersArray=new Array();
}
}
*/
var elevator = new google.maps.ElevationService();
/* This function preps the addresses and calls the geocoder */
function codeAddress() {
//we reinitialize each time this is called, so it recenters on the world
//I did this since it is difficult to zoom based on the lat/lngs
initialize();
//get the locations from the input box
var address = document.getElementById("address").value;
//convert the input box into an array
address_array=convert_terms_to_array(address)
//iterate over the addresses and append the "loc:" tag to the beginning
//which overwrites google point of interest detector
saved_address_array=new Array();
for (var i=0; i<address_array.length-1; i++){
if (address_array[i] != ''){
address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
saved_address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
}else if (address_array[i] == '' && address_array[i-1]!=''){
address_array[i]=address_array[i-1]
saved_address_array[i]=address_array[i-1]
}else{
saved_address_array[i]=address_array[i]
}
}
//get a unique list of the address
unique_addresses=unique(address_array)
//no longer needed since we are re-initializing
//clearOverlays();
latitude=new Array();
longitude=new Array();
elevation=new Array();
var latlong
var iterator=0;
timer_ms=0;
if (geocoder) {
//give status updates
document.getElementById("loading_status").innerHTML='Loading coordinates'
//iterate over the addresses and append a timing event, since google
//has a query limit per second
for (var i=0; i<unique_addresses.length; i++){
if (unique_addresses[i]!=''){
var lat2=setTimeout('geocode_results('+i+')',timer_ms)
timer_ms+=700
}
}
//append to the status after all points should have loaded
setTimeout("document.getElementById('loading_status').innerHTML='Completed'",timer_ms)
}
}
/* This function gets the Lat/Long using Google Maps Geocoder API. */
function geocode_results(i){
//query google maps for lat/lngs
geocoder.geocode( { 'address': unique_addresses[i]}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
var latlong = new google.maps.LatLng(results[0].geometry.location.lat(),results[0].geometry.location.lng());
var pinImage = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_letter&chld= |" + 'FF0000',
new google.maps.Size(21, 34),
new google.maps.Point(0,0),
new google.maps.Point(10, 34));
var pinShadow = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_shadow",
new google.maps.Size(40, 37),
new google.maps.Point(0, 0),
new google.maps.Point(12, 35));
/* This function gets the Elevation using Google Maps Elevations API. */
elevator.getElevationForLocations({'locations':[latlong]}, function(results2, status2){
if (status == google.maps.ElevationStatus.OK) {
// Retrieve the first result
if (results2[0]) {
//assign lat/lng/elev to arrays
latitude[unique_addresses[i]]=results2[0].location.lat()
longitude[unique_addresses[i]]=results2[0].location.lng()
elevation[unique_addresses[i]]=results2[0].elevation;
//put a pointer on the map
markersArray[unique_addresses[i]] = new google.maps.Marker({
position: latlong,
map: map,
color: '#FF0000',
clickable: false,
icon: pinImage,
shadow: pinShadow
});
} else {
alert("No elevation results found!");
}
}
});
}else{
alert(status)
alert("Unable to find the Location you specified!");
}
})
}
/* This function outputs the Lat/Long/Elev to the Console. */
function output_latlong(){
//generate the output content
type=document.getElementById('latlngType').value
var content='';
for (var i=0; i<saved_address_array.length; i++) {
if (type=='Latitude'){
content=content+latitude[saved_address_array[i]]+'<br>';
}else if (type=='Longitude'){
content=content+longitude[saved_address_array[i]]+'<br>';
}else if (type=='Elevation'){
content=content+elevation[saved_address_array[i]]+'<br>';
}
}
//write page
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This is the AJAX function which produces the list of terms below each input
box. It takes as input:
1) the ontology select box id
2) the query string
3) the input box id
4) the txt box below input id
*/
function showResult(ont_id,str,inputbox_id,txt_id)
{
// If the substring length is empty, then do nothing
if (str.length==0)
{
return;
}
// If the substring is at least one in length, then search for similar terms
// in the ontologies selected. This is where we can set the length to start
//searches (i.e. after 3 letters are present.
else if (str.length>0){
//remove text or checkmark next to the input box and change font color
//to black
document.getElementById('valid'+inputbox_id).innerHTML="";
document.getElementById('valid'+inputbox_id).style.color="black";
xmlhttp=GetXmlHttpObject()
//check if browser can perform xmlhttp
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
//get the list of ontologies using the ontology id
ont_list=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ont_list)
//generate a url string where we pass our variables
var url="ontology_search.psp";
url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4){
//write the list of similar terms from the database
document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText;
document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2";
xmlhttp.close();
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
}
}
/*
This is the AJAX function which validates the terms in each input
box. It takes as input:
1) the ontology select box id
2) the initial list of ontology terms from user
3) the table where all results should be written
4) whether this is the first call of this function
5) whether this is an export call
*/
function validateInput(ont_id,ont_term_list,table_id,new_data,export_data)
{
//if no data is input produce an alert
if (ont_term_list.length==0){
alert("Paste some data in the input box!");
return;
}
//get the list of ontologies using the ontology id
ontologies=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ontologies)
//if no ontology is selected produce an alert
if (selected_ont==''){
alert("Select at least one Ontology!")
return;
}
//take the pasted terms from user and convert those terms to an array
ont_term_array=convert_terms_to_array(ont_term_list);
//save this original list of terms from the user into an array
original_ont_term_array=ont_term_array;
//create an array to store the terms from the input boxes as they are being
//modified
updated_unique_terms=new Array();
//if this is the first call to this function, create a unique list of terms
//build the input boxes
if (new_data == 'True')
{
//original_ont_term_array=new Array();
original_unique_terms=new Array();
//remove old input boxes, so the user can re-use the app over and over
clear_inputs(table_id)
//generate unique list and input boxes
unique_ont_array=write_input_boxes(ont_term_array,table_id);
//store unique ontology terms for later use
original_unique_terms=unique_ont_array;
updated_unique_terms=unique_ont_array;
}
//If this is not the first call, retrieve values from input boxes
else
{
//get the values from the input boxes
unique_ont_array=get_inputs(unique_ont_array);
updated_unique_terms=unique_ont_array;
}
//check if browser can perform xmlhttp
xmlhttp=GetXmlHttpObject()
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
/*
var url="ontology_validate.psp";
url=url+"?ont_id="+selected_ont+"&ont_terms="+unique_ont_array;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4)
{
//since the response from the PL/SQL is a string using the "#'
//delimitor, so we need to split and write them to the table
validity=xmlhttp.responseText.split('#')
for (var i=0; i<validity.length;i++){
//determine if an input value is valid and write 'Invalid' or a
//checkbox accordingly
if (validity[i]=='Valid' || validity[i]=='Valid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML='✓';
document.getElementById('validtxtbox'+(i)).style.color="green";
}else if (validity[i]=='Invalid' || validity[i]=='Invalid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML=validity[i];
document.getElementById('validtxtbox'+(i)).style.color="red";
}
}
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
*/
//If the data is supposed to be exported, write the data to the new window
if (export_data=='True'){
write_data_to_new_window(original_ont_term_array,original_unique_terms,updated_unique_terms);
}
}
function GetXmlHttpObject()
{
if (window.XMLHttpRequest)
{
// code for IE7+, Firefox, Chrome, Opera, Safari
return new XMLHttpRequest();
}
if (window.ActiveXObject)
{
// code for IE6, IE5
return new ActiveXObject("Microsoft.XMLHTTP");
}
return null;
}
/*
This function changes the input box value when the user clicks on a term
in the list of terms
*/
function change_form_value(form_field,form_value,inputbox_id){
//change the input box value
document.getElementById(form_field).value=form_value;
//Clear the list of ontology terms
document.getElementById('input'+inputbox_id).innerHTML='';
document.getElementById('input'+inputbox_id).style.border="0px";
//Add a checkmark next to the input box
document.getElementById('valid'+form_field).innerHTML='✓';
document.getElementById('valid'+form_field).style.color="green";
}
/*
when iterating through list of ontology terms, upon onfocus, this changes
the background to cyan
*/
function setStyle(x)
{
document.getElementById(x).style.background="cyan"
}
/*
when iterating through list of ontology terms, when removing focus (onblur),
this changes the background to cyan
*/
function removeStyle(x)
{
document.getElementById(x).style.background="white"
}
/*
This function checks to see if all input boxes are valid, updates the original
list of terms from the user, with the corrected terms, then calls the
function to write the data to the new window
*/
function write_data_to_new_window(original_ont_term_array, original_unique_terms, updated_unique_terms){
//Determine that all terms are valid
for (var i=0;i<original_unique_terms.length;i++){
if (original_unique_terms[i]!=''){
validity=document.getElementById('validtxtbox'+(i)).innerHTML
if ( validity=='' || validity=='Click Input Box...'){
alert('You need choose valid terms!');
return;
}else if (validity=='Invalid' || validity=='Invalid\n'){
alert('You have invalid terms!');
return;
}
}
}
//generate a new array with update terms based on the valid input boxes
output_array=new Array();
//using length-1 since we appended an empty element to the list in the
//convert_terms_to_array function.
for (var j=0;j<original_ont_term_array.length-1;j++){
for (var k=0;k<original_unique_terms.length;k++){ | output_array.push(updated_unique_terms[k]);
}
}
if(original_ont_term_array[j]=='' && j!=0){
output_array.push(output_array[j-1]);
}else if(original_ont_term_array[j]=='' && j==0){
output_array.push('n/a');
}
}
//write the array to the new window
writeConsole(output_array.join('<br>'));
}
/*
This function creates a new console window and writes an html page containing
the corrected list of terms
*/
function writeConsole(content)
{
//open new window
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
//write page
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This function gets the terms from the input boxes and puts them into an array
*/
function get_inputs(unique_ont_array){
new_unique_ont_array= new Array();
for (var i = 0; i<unique_ont_array.length;i++){
if (unique_ont_array[i]!=''){
new_unique_ont_array[i]=document.getElementById('txtbox'+i).value;
}
}
return new_unique_ont_array;
}
/*
This function gets a list of selected ontologies, concatenates them and formats
them as a string to be used by PL/SQL
*/
function get_selected(selObject){
var arSelected = new Array();
for (i=0;i<selObject.options.length;i++){
if (selObject.options[i].selected==true){
arSelected.push('\''+selObject.options[i].value+'\'');
};
}
onts=arSelected.join(',');
return onts
}
/*
This function takes an array and produces another array with only unique values
*/
function unique(a){
for( var i=a.length; --i>-1; ) {
for( var j=a.length; --j>-1; )
{
//must convert to uppercase for comparison, so there are no
//case-sensitivity issues
if(i != j && a[i].toUpperCase() == a[j].toUpperCase() && a[j]!='') a.splice(i,1);
}
}
//filter out the empty strings
var unique_terms=new Array();
for (var k=0;k<a.length;k++){
if (a[k] != ''){
unique_terms.push(a[k]);
}
}
return unique_terms;
}
/*
This function clears the result table, so the user can perform many searches
*/
function clear_inputs(table_id){
for(var i = document.getElementById(table_id).rows.length; i > 0;i--)
{
document.getElementById(table_id).deleteRow(i -1);
}
}
/*
This function checks that text was typed in the input textarea and then takes
that list and converts the string into an array based on new-line character
*/
function convert_terms_to_array(ont_term_list){
ont_term_array=ont_term_list.split('\n')
//need to add another element to array, since there may not be a new line
//at the end of the list
ont_term_array.push('');
if (ont_term_array=='')
{
alert("Input a list of terms!")
return;
}
filtered_array=new Array();
for (var i=0;i<ont_term_array.length;i++){
filtered_array.push(ont_term_array[i])
/* if (ont_term_array[i]!=''){
filtered_array.push(ont_term_array[i])
}*/
}
return filtered_array;
}
/*
This function writes the input boxes into the results table. Each input box has
4 fields that are created as follows:
================== ======================
input-box-validity input-box
------------------ ----------------------
empty field list-of-ontology-terms
================== ======================
...
*/
function write_input_boxes(ont_term_array,table_id){
//create the header fields in the table (2 columns)
unique_ont_array=unique(ont_term_array);
var otable = document.getElementById(table_id);
var row1 = document.createElement("TR");
var th1 = document.createElement("TH");
th1.appendChild(document.createTextNode("Valid:"));
row1.appendChild(th1);
var th2 = document.createElement("TH");
th2.appendChild(document.createTextNode("Distinct Terms from Input List:"));
row1.appendChild(th2);
otable.appendChild(row1);
for (var i = 0; i<unique_ont_array.length;i++){
//make sure the array field is not empty, which happens when performing
//javascript split() and join() functions
if (unique_ont_array[i]!=''){
//create the input-box-validity field
var row2 = document.createElement("TR");
var td1 = document.createElement("TD");
td1.id='validtxtbox'+i;
td1.style.width='130px';
td1.appendChild(document.createTextNode('Click Input Box...'));
row2.appendChild(td1);
//create the input-box field
var td2 = document.createElement("TD");
var input1 = document.createElement("input");
input1.value=unique_ont_array[i];
input1.id='txtbox'+i;
input1.setAttribute('size','50');
input1.setAttribute('onkeyup',"showResult('ontologies',this.value,this.id,this.id)");
input1.setAttribute('onclick',"showResult('ontologies',this.value,this.id,this.id)");
td2.appendChild(input1);
row2.appendChild(td2);
otable.appendChild(row2);
//create the empty field
var row3 = document.createElement("TR");
var td3 = document.createElement("TD");
row3.appendChild(td3);
//ccreate the list-of-ontology-terms field
var td4 = document.createElement("TD");
td4.id='input'+input1.id;
row3.appendChild(td4);
otable.appendChild(row3);
}
}
return unique_ont_array;
}
//this function resets the default select option and is for the select box
//above the ontology select field
function reset_select(selObject){
for (i=0;i<selObject.options.length;i++){
selObject.options[i].selected=false;
}
}
//this function selects all from the select options and is for the select box
//above the ontology select field
function select_all(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=true;
}
}
//this function selects none from the select options and is for the select box
//above the ontology select field
function select_none(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=false;
}
}
//this function inverts the selection from the select options and is for the
//select box above the ontology select field
function select_invert(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
if (listbox_values.options[i].selected==true){
listbox_values.options[i].selected=false;
}else{
listbox_values.options[i].selected=true;
}
}
} | if (original_ont_term_array[j]==original_unique_terms[k]){ | random_line_split |
ontology.js | /*
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2010, Qiime Web Analysis"
__credits__ = ["Jesse Stombaugh", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.0.0.dev"
__maintainer__ = ["Jesse Stombaugh"]
__email__ = "[email protected]"
__status__ = "Production"
*/
var xmlhttp
var geocoder;
var map;
var marker;
var latitude=new Array();
var longitude=new Array();
var elevation=new Array();
var markersArray = [];
var infoWindowArray = [];
/*This changes the color of the table background when a user mouses over the Tool buttons. */
function mouseover(key){
cell=document.getElementById(key);
if (cell.bgcolor=='black'){
cell.bgcolor=='blue';
}else{
cell.bgcolor=='black';
}
}
/* These two function turn on/off the visibility of the two tools */
function displayOntology(){
document.getElementById("ontology_lookup").style.display='';
document.getElementById("geographic_location").style.display='none';
document.getElementById("map_canvas").style.visibility='hidden';
}
function displayGeography(){
document.getElementById("ontology_lookup").style.display='none';
document.getElementById("geographic_location").style.display='';
document.getElementById("map_canvas").style.visibility='visible';
}
/* Initializes the Google Map. */
function initialize(){
geocoder = new google.maps.Geocoder();
var latlng = new google.maps.LatLng(0,0);
var myOptions = {
zoom: 1,
center: latlng,
mapTypeId: google.maps.MapTypeId.ROADMAP
}
map = new google.maps.Map(document.getElementById("map_canvas"), myOptions);
}
/* Removes the overlays from the map, but keeps them in the array*
//not used, since we are re-initializing the map
function clearOverlays() {
if (markersArray) {
for (i in markersArray) {
markersArray[i].setMap(null)
//infoWindowArray[i].close()
}
markersArray.length = 0;
//infoWindowArray.length = 0;
//markersArray=new Array();
}
}
*/
var elevator = new google.maps.ElevationService();
/* This function preps the addresses and calls the geocoder */
function codeAddress() {
//we reinitialize each time this is called, so it recenters on the world
//I did this since it is difficult to zoom based on the lat/lngs
initialize();
//get the locations from the input box
var address = document.getElementById("address").value;
//convert the input box into an array
address_array=convert_terms_to_array(address)
//iterate over the addresses and append the "loc:" tag to the beginning
//which overwrites google point of interest detector
saved_address_array=new Array();
for (var i=0; i<address_array.length-1; i++){
if (address_array[i] != ''){
address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
saved_address_array[i]='loc:'+address_array[i].replace(/^loc:/i, '')
}else if (address_array[i] == '' && address_array[i-1]!=''){
address_array[i]=address_array[i-1]
saved_address_array[i]=address_array[i-1]
}else{
saved_address_array[i]=address_array[i]
}
}
//get a unique list of the address
unique_addresses=unique(address_array)
//no longer needed since we are re-initializing
//clearOverlays();
latitude=new Array();
longitude=new Array();
elevation=new Array();
var latlong
var iterator=0;
timer_ms=0;
if (geocoder) {
//give status updates
document.getElementById("loading_status").innerHTML='Loading coordinates'
//iterate over the addresses and append a timing event, since google
//has a query limit per second
for (var i=0; i<unique_addresses.length; i++){
if (unique_addresses[i]!=''){
var lat2=setTimeout('geocode_results('+i+')',timer_ms)
timer_ms+=700
}
}
//append to the status after all points should have loaded
setTimeout("document.getElementById('loading_status').innerHTML='Completed'",timer_ms)
}
}
/* This function gets the Lat/Long using Google Maps Geocoder API. */
function geocode_results(i){
//query google maps for lat/lngs
geocoder.geocode( { 'address': unique_addresses[i]}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
var latlong = new google.maps.LatLng(results[0].geometry.location.lat(),results[0].geometry.location.lng());
var pinImage = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_letter&chld= |" + 'FF0000',
new google.maps.Size(21, 34),
new google.maps.Point(0,0),
new google.maps.Point(10, 34));
var pinShadow = new google.maps.MarkerImage("http://chart.apis.google.com/chart?chst=d_map_pin_shadow",
new google.maps.Size(40, 37),
new google.maps.Point(0, 0),
new google.maps.Point(12, 35));
/* This function gets the Elevation using Google Maps Elevations API. */
elevator.getElevationForLocations({'locations':[latlong]}, function(results2, status2){
if (status == google.maps.ElevationStatus.OK) {
// Retrieve the first result
if (results2[0]) {
//assign lat/lng/elev to arrays
latitude[unique_addresses[i]]=results2[0].location.lat()
longitude[unique_addresses[i]]=results2[0].location.lng()
elevation[unique_addresses[i]]=results2[0].elevation;
//put a pointer on the map
markersArray[unique_addresses[i]] = new google.maps.Marker({
position: latlong,
map: map,
color: '#FF0000',
clickable: false,
icon: pinImage,
shadow: pinShadow
});
} else {
alert("No elevation results found!");
}
}
});
}else{
alert(status)
alert("Unable to find the Location you specified!");
}
})
}
/* This function outputs the Lat/Long/Elev to the Console. */
function output_latlong(){
//generate the output content
type=document.getElementById('latlngType').value
var content='';
for (var i=0; i<saved_address_array.length; i++) {
if (type=='Latitude'){
content=content+latitude[saved_address_array[i]]+'<br>';
}else if (type=='Longitude'){
content=content+longitude[saved_address_array[i]]+'<br>';
}else if (type=='Elevation'){
content=content+elevation[saved_address_array[i]]+'<br>';
}
}
//write page
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This is the AJAX function which produces the list of terms below each input
box. It takes as input:
1) the ontology select box id
2) the query string
3) the input box id
4) the txt box below input id
*/
function showResult(ont_id,str,inputbox_id,txt_id)
{
// If the substring length is empty, then do nothing
if (str.length==0)
{
return;
}
// If the substring is at least one in length, then search for similar terms
// in the ontologies selected. This is where we can set the length to start
//searches (i.e. after 3 letters are present.
else if (str.length>0){
//remove text or checkmark next to the input box and change font color
//to black
document.getElementById('valid'+inputbox_id).innerHTML="";
document.getElementById('valid'+inputbox_id).style.color="black";
xmlhttp=GetXmlHttpObject()
//check if browser can perform xmlhttp
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
//get the list of ontologies using the ontology id
ont_list=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ont_list)
//generate a url string where we pass our variables
var url="ontology_search.psp";
url=url+"?ont="+selected_ont+"&q="+str+"&inputid="+inputbox_id+"&txt_id="+txt_id;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4){
//write the list of similar terms from the database
document.getElementById('input'+inputbox_id).innerHTML=xmlhttp.responseText;
document.getElementById('input'+inputbox_id).style.border="1px solid #A5ACB2";
xmlhttp.close();
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
}
}
/*
This is the AJAX function which validates the terms in each input
box. It takes as input:
1) the ontology select box id
2) the initial list of ontology terms from user
3) the table where all results should be written
4) whether this is the first call of this function
5) whether this is an export call
*/
function validateInput(ont_id,ont_term_list,table_id,new_data,export_data)
{
//if no data is input produce an alert
if (ont_term_list.length==0){
alert("Paste some data in the input box!");
return;
}
//get the list of ontologies using the ontology id
ontologies=document.getElementById(ont_id)
//get only the selected ontologies and convert to PL/SQL formatted text
selected_ont=get_selected(ontologies)
//if no ontology is selected produce an alert
if (selected_ont==''){
alert("Select at least one Ontology!")
return;
}
//take the pasted terms from user and convert those terms to an array
ont_term_array=convert_terms_to_array(ont_term_list);
//save this original list of terms from the user into an array
original_ont_term_array=ont_term_array;
//create an array to store the terms from the input boxes as they are being
//modified
updated_unique_terms=new Array();
//if this is the first call to this function, create a unique list of terms
//build the input boxes
if (new_data == 'True')
{
//original_ont_term_array=new Array();
original_unique_terms=new Array();
//remove old input boxes, so the user can re-use the app over and over
clear_inputs(table_id)
//generate unique list and input boxes
unique_ont_array=write_input_boxes(ont_term_array,table_id);
//store unique ontology terms for later use
original_unique_terms=unique_ont_array;
updated_unique_terms=unique_ont_array;
}
//If this is not the first call, retrieve values from input boxes
else
{
//get the values from the input boxes
unique_ont_array=get_inputs(unique_ont_array);
updated_unique_terms=unique_ont_array;
}
//check if browser can perform xmlhttp
xmlhttp=GetXmlHttpObject()
if (xmlhttp==null){
alert ("Your browser does not support XML HTTP Request");
return;
}
/*
var url="ontology_validate.psp";
url=url+"?ont_id="+selected_ont+"&ont_terms="+unique_ont_array;
url=url+"&sid="+Math.random();
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4)
{
//since the response from the PL/SQL is a string using the "#'
//delimitor, so we need to split and write them to the table
validity=xmlhttp.responseText.split('#')
for (var i=0; i<validity.length;i++){
//determine if an input value is valid and write 'Invalid' or a
//checkbox accordingly
if (validity[i]=='Valid' || validity[i]=='Valid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML='✓';
document.getElementById('validtxtbox'+(i)).style.color="green";
}else if (validity[i]=='Invalid' || validity[i]=='Invalid\n'){
document.getElementById('validtxtbox'+(i)).innerHTML=validity[i];
document.getElementById('validtxtbox'+(i)).style.color="red";
}
}
}
}
//perform a GET
xmlhttp.open("GET",url,true);
xmlhttp.send(null)
*/
//If the data is supposed to be exported, write the data to the new window
if (export_data=='True'){
write_data_to_new_window(original_ont_term_array,original_unique_terms,updated_unique_terms);
}
}
function GetXmlHttpObject()
{
if (window.XMLHttpRequest)
{
// code for IE7+, Firefox, Chrome, Opera, Safari
return new XMLHttpRequest();
}
if (window.ActiveXObject)
{
// code for IE6, IE5
return new ActiveXObject("Microsoft.XMLHTTP");
}
return null;
}
/*
This function changes the input box value when the user clicks on a term
in the list of terms
*/
function change_form_value(form_field,form_value,inputbox_id){
//change the input box value
document.getElementById(form_field).value=form_value;
//Clear the list of ontology terms
document.getElementById('input'+inputbox_id).innerHTML='';
document.getElementById('input'+inputbox_id).style.border="0px";
//Add a checkmark next to the input box
document.getElementById('valid'+form_field).innerHTML='✓';
document.getElementById('valid'+form_field).style.color="green";
}
/*
when iterating through list of ontology terms, upon onfocus, this changes
the background to cyan
*/
function setStyle(x)
{
document.getElementById(x).style.background="cyan"
}
/*
when iterating through list of ontology terms, when removing focus (onblur),
this changes the background to cyan
*/
function removeStyle(x)
{
document.getElementById(x).style.background="white"
}
/*
This function checks to see if all input boxes are valid, updates the original
list of terms from the user, with the corrected terms, then calls the
function to write the data to the new window
*/
function write_data_to_new_window(original_ont_term_array, original_unique_terms, updated_unique_terms){
//Determine that all terms are valid
for (var i=0;i<original_unique_terms.length;i++){
if (original_unique_terms[i]!=''){
validity=document.getElementById('validtxtbox'+(i)).innerHTML
if ( validity=='' || validity=='Click Input Box...') | else if (validity=='Invalid' || validity=='Invalid\n'){
alert('You have invalid terms!');
return;
}
}
}
//generate a new array with update terms based on the valid input boxes
output_array=new Array();
//using length-1 since we appended an empty element to the list in the
//convert_terms_to_array function.
for (var j=0;j<original_ont_term_array.length-1;j++){
for (var k=0;k<original_unique_terms.length;k++){
if (original_ont_term_array[j]==original_unique_terms[k]){
output_array.push(updated_unique_terms[k]);
}
}
if(original_ont_term_array[j]=='' && j!=0){
output_array.push(output_array[j-1]);
}else if(original_ont_term_array[j]=='' && j==0){
output_array.push('n/a');
}
}
//write the array to the new window
writeConsole(output_array.join('<br>'));
}
/*
This function creates a new console window and writes an html page containing
the corrected list of terms
*/
function writeConsole(content)
{
//open new window
top.consoleRef=window.open('','myconsole','width=350,height=400,menubar=0,toolbar=1,status=0,scrollbars=1,resizable=1')
//write page
top.consoleRef.document.writeln('<html><head><title>Console</title></head><body bgcolor=white onLoad="self.focus()">'+content+'</body></html>')
top.consoleRef.document.close()
}
/*
This function gets the terms from the input boxes and puts them into an array
*/
function get_inputs(unique_ont_array){
new_unique_ont_array= new Array();
for (var i = 0; i<unique_ont_array.length;i++){
if (unique_ont_array[i]!=''){
new_unique_ont_array[i]=document.getElementById('txtbox'+i).value;
}
}
return new_unique_ont_array;
}
/*
This function gets a list of selected ontologies, concatenates them and formats
them as a string to be used by PL/SQL
*/
function get_selected(selObject){
var arSelected = new Array();
for (i=0;i<selObject.options.length;i++){
if (selObject.options[i].selected==true){
arSelected.push('\''+selObject.options[i].value+'\'');
};
}
onts=arSelected.join(',');
return onts
}
/*
This function takes an array and produces another array with only unique values
*/
function unique(a){
for( var i=a.length; --i>-1; ) {
for( var j=a.length; --j>-1; )
{
//must convert to uppercase for comparison, so there are no
//case-sensitivity issues
if(i != j && a[i].toUpperCase() == a[j].toUpperCase() && a[j]!='') a.splice(i,1);
}
}
//filter out the empty strings
var unique_terms=new Array();
for (var k=0;k<a.length;k++){
if (a[k] != ''){
unique_terms.push(a[k]);
}
}
return unique_terms;
}
/*
This function clears the result table, so the user can perform many searches
*/
function clear_inputs(table_id){
for(var i = document.getElementById(table_id).rows.length; i > 0;i--)
{
document.getElementById(table_id).deleteRow(i -1);
}
}
/*
This function checks that text was typed in the input textarea and then takes
that list and converts the string into an array based on new-line character
*/
function convert_terms_to_array(ont_term_list){
ont_term_array=ont_term_list.split('\n')
//need to add another element to array, since there may not be a new line
//at the end of the list
ont_term_array.push('');
if (ont_term_array=='')
{
alert("Input a list of terms!")
return;
}
filtered_array=new Array();
for (var i=0;i<ont_term_array.length;i++){
filtered_array.push(ont_term_array[i])
/* if (ont_term_array[i]!=''){
filtered_array.push(ont_term_array[i])
}*/
}
return filtered_array;
}
/*
This function writes the input boxes into the results table. Each input box has
4 fields that are created as follows:
================== ======================
input-box-validity input-box
------------------ ----------------------
empty field list-of-ontology-terms
================== ======================
...
*/
function write_input_boxes(ont_term_array,table_id){
//create the header fields in the table (2 columns)
unique_ont_array=unique(ont_term_array);
var otable = document.getElementById(table_id);
var row1 = document.createElement("TR");
var th1 = document.createElement("TH");
th1.appendChild(document.createTextNode("Valid:"));
row1.appendChild(th1);
var th2 = document.createElement("TH");
th2.appendChild(document.createTextNode("Distinct Terms from Input List:"));
row1.appendChild(th2);
otable.appendChild(row1);
for (var i = 0; i<unique_ont_array.length;i++){
//make sure the array field is not empty, which happens when performing
//javascript split() and join() functions
if (unique_ont_array[i]!=''){
//create the input-box-validity field
var row2 = document.createElement("TR");
var td1 = document.createElement("TD");
td1.id='validtxtbox'+i;
td1.style.width='130px';
td1.appendChild(document.createTextNode('Click Input Box...'));
row2.appendChild(td1);
//create the input-box field
var td2 = document.createElement("TD");
var input1 = document.createElement("input");
input1.value=unique_ont_array[i];
input1.id='txtbox'+i;
input1.setAttribute('size','50');
input1.setAttribute('onkeyup',"showResult('ontologies',this.value,this.id,this.id)");
input1.setAttribute('onclick',"showResult('ontologies',this.value,this.id,this.id)");
td2.appendChild(input1);
row2.appendChild(td2);
otable.appendChild(row2);
//create the empty field
var row3 = document.createElement("TR");
var td3 = document.createElement("TD");
row3.appendChild(td3);
//ccreate the list-of-ontology-terms field
var td4 = document.createElement("TD");
td4.id='input'+input1.id;
row3.appendChild(td4);
otable.appendChild(row3);
}
}
return unique_ont_array;
}
//this function resets the default select option and is for the select box
//above the ontology select field
function reset_select(selObject){
for (i=0;i<selObject.options.length;i++){
selObject.options[i].selected=false;
}
}
//this function selects all from the select options and is for the select box
//above the ontology select field
function select_all(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=true;
}
}
//this function selects none from the select options and is for the select box
//above the ontology select field
function select_none(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
listbox_values.options[i].selected=false;
}
}
//this function inverts the selection from the select options and is for the
//select box above the ontology select field
function select_invert(listbox_id){
var listbox_values=document.getElementById(listbox_id);
for (i=0;i<listbox_values.options.length;i++){
if (listbox_values.options[i].selected==true){
listbox_values.options[i].selected=false;
}else{
listbox_values.options[i].selected=true;
}
}
}
| {
alert('You need choose valid terms!');
return;
} | conditional_block |
render.js | import * as THREE from "three";
import Stats from "stats.js";
import * as d3 from "d3";
import seedrandom from "seedrandom";
import * as dat from "exdat";
import noise from "fast-simplex-noise";
import Terrain from "../lib/terrain.js";
var style = document.createElement("style");
style.type = "text/css";
// TODO remove overflow hidden from body
style.appendChild(document.createTextNode("body {margin: 0; overflow:hidden}\n canvas {width:100%; height: 100%;}"));
document.head.appendChild(style);
function walk(cells, edges, cell, distance) {
for (var i = 0; i < cell.faces.length; i++) {
cell.faces[i].color.setRGB(1, 0, 0);
}
for (var i = 0; i < cell.halfedges.length; i++) {
var edge = edges[cell.halfedges[i]];
var thisSite = cell.site;
var neighboorSite = edge.left == cell.site ? edge.right : edge.left;
if (neighboorSite) {
var neighboorCell = cells[neighboorSite.index];
var neighboorDistance = new THREE.Vector2(thisSite[0] - neighboorSite[0], thisSite[1] - neighboorSite[1]).length();
if (neighboorDistance <= distance) {
walk(cells, edges, neighboorCell, distance - neighboorDistance);
}
}
}
}
window.addEventListener("keyup", function (e) {
var char = String.fromCharCode(e.keyCode);
if (char == 'R') {
newVoronoi();
} else if (char == 'T') {
var x = Math.random(), y = Math.random();
var r = 0.05;
if (map) {
var site = map.voronoi.find(x, y);
if (site) {
var cell = map.voronoi.cells[site.index];
walk(map.voronoi.cells, map.voronoi.edges, cell, r);
mesh.map.geometry.colorsNeedUpdate = true;
}
}
}
});
var lastMouse;
var cameraTarget = new THREE.Vector3(0, 0, 0);
window.addEventListener("mousemove", function (e) {
e.preventDefault();
if ( e.buttons == 4 ) {
var diff = cameraTarget.clone().sub(camera.position);
diff.y = 0;
var radious = diff.length();
if (!lastMouse) {
lastMouse = { x:e.clientX, y:e.clientY, phi: camera.position.angleTo(new THREE.Vector3(0,0,1)), theta: Math.asin(camera.position.x / radious) };
}
var theta = lastMouse.theta + ( ( e.clientY - lastMouse.y ) * Math.PI * 0.01 );
var phi = lastMouse.phi + ( ( e.clientX - lastMouse.x ) * 0.5 );
camera.position.x = radious * Math.sin( theta );
camera.position.z = radious * Math.cos( theta );
camera.lookAt(cameraTarget);
camera.updateMatrix();
} else {
lastMouse = null;
}
});
window.addEventListener("wheel", function (e) {
var direction = cameraTarget.clone().sub(camera.position);
var len = direction.length();
direction.normalize();
camera.position.add(direction.multiplyScalar(e.deltaY * -0.01 * Math.min(1, Math.pow(len, 5))));
});
//var rand = seedrandom("andre", { global: true });
var container = document.body;
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 0.1, 1000 );
camera.position.set( 0, 2, -2 );
camera.lookAt(cameraTarget);
scene.add( camera );
var light = new THREE.PointLight( 0xffffff, 0.8 );
camera.add( light );
var grid = new THREE.GridHelper(2, 20);
scene.add(grid);
var renderer = new THREE.WebGLRenderer( { antialias: true } );
renderer.setClearColor( 0xf0f0f0 );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
var stats = new Stats();
container.appendChild( stats.dom );
var RENDER = {
biome: 0,
height: 1,
humidity: 2,
height_X_humidity: 3
}
function Settings() {
this.seed = 0;
this.sites = 4000;
this.showPolygons = false;
this.showSites = false;
this.flat = false;
this.seaLevel = 0.45;
this.heightFrequency = 1.2;
this.heightScale = 0.3;
this.render = "biome";
}
var gui = new dat.GUI();
var settings = new Settings();
gui.add(settings, "sites", 0).step(50).onFinishChange(newVoronoi);
gui.add(settings, "heightScale", 0, 1).step(0.01);
gui.add(settings, "heightFrequency").step(0.01).onFinishChange(newVoronoi);
gui.add(settings, "showPolygons").onFinishChange(function(v) { if (mesh) { mesh.border.visible = v; } });
gui.add(settings, "showSites").onFinishChange(function(v) { if (mesh) { mesh.sites.visible = v; } });;
gui.add(settings, "flat").onFinishChange(makeFlat);
gui.add(settings, "seaLevel", 0, 1).step(0.01).onFinishChange(newVoronoi);
gui.add(settings, "render", Object.keys(RENDER)).onFinishChange(changeRender);
function changeRender() |
function makeFlat(f) {
function mf(g) {
if (!g.old) {
g.old = g.vertices.map(function(v,i) {
return v.y;
});
}
g.vertices.map(function (v, i) {
if (f) { v.y = 0; } else { v.y = g.old[i]; }
});
g.dynamic = g.verticesNeedUpdate = true;
}
if (mesh) {
mf(mesh.map.geometry);
mf(mesh.sites.geometry);
mf(mesh.border.geometry);
if (!f) {
mesh.map.material = mesh.map.originalMaterial;
} else {
mesh.map.material = new THREE.MeshBasicMaterial( { side: THREE.FrontSide, vertexColors: THREE.FaceColors } );
}
}
}
var width = 1;
var height = 1;
function createVoronoi(nsites) {
var width = 1;
var height = 1;
var voronoi = d3.voronoi()
.extent([[0, 0], [width, height]]);
function generateSites() {
return d3.range(nsites)
.map(function(d) { return [Math.random() * width, Math.random() * height]; });
}
function calculateCentroid(pts) {
var x = 0;
var y = 0;
for (var i = 0; i < pts.length; i++) {
x += pts[i][0];
y += pts[i][1];
}
return [x/pts.length, y/pts.length];
}
function loydIteration(sites, iterations) {
iterations = iterations || 1;
for (var i = 0; i < iterations; i++) {
sites = voronoi(sites)
.polygons()
.map(calculateCentroid);
}
return sites;
}
var sites = loydIteration(generateSites(), 1);
var diag = voronoi(sites);
diag.sites = sites;
return diag;
}
var mesh;
var map;
function calculateColor(h, x, y) {
var color;
var seaHeight = settings.heightScale * settings.seaLevel;
var nh = h / (settings.heightScale - seaHeight);
var p = Math.max(nh, 0.5) / 0xff;
var humidity = (noiseGen.raw2D(10+x,10+y) + 1) / 2;
var biome = {
water: 0x1a3560,
scorched: 0x999999,
bare: 0xbbbbbb,
tundra: 0xddddbb,
snow: 0xf8f8f8,
taiga: 0xccd4bb,
shrubland: 0xc4ccbb,
temperateDesert: 0xe4e8ca,
temperateRainFlorest: 0xa4c4a8,
temperateDeciduousFlorest: 0xb4c9a9,
grassland: 0xc4d4aa,
tropicalRainForest: 0x9cbba9,
tropicalSeasonalForest: 0xa9cca4,
subtropicalDesert: 0xe9ddc7
};
// first dimension is height, second is humidity
var colorMap = [
// very dry, dry damp wet very wet drenched
[ biome.subtropicalDesert, biome.grassland, biome.tropicalSeasonalForest, biome.tropicalRainForest, biome.tropicalRainForest, biome.tropicalRainForest], // height level 1
[ biome.temperateDesert, biome.grassland, biome.grassland, biome.temperateDeciduousFlorest, biome.temperateDeciduousFlorest, biome.temperateRainFlorest], // height level 2
[ biome.temperateDesert, biome.temperateDesert, biome.shrubland, biome.shrubland, biome.taiga, biome.snow], // height level 3
[ biome.scorched, biome.bare, biome.tundra, biome.snow, biome.snow, biome.snow] // height level 4
];
var heb = nh;
var hub = humidity;
var hei = heb < 1/4 ? 0 : heb < 2/4 ? 1 : heb < 3/4 ? 2 : 3;
var hui = hub < 1/6 ? 0 : hub < 2/6 ? 1 : hub < 3/6 ? 2 : hub < 4/6 ? 3 : hub < 5/6 ? 4 : 5;
switch (RENDER[settings.render]) {
case RENDER.biome:
if (nh > 0) {
color = new THREE.Color(colorMap[hei][hui]);
} else {
// water
color = new THREE.Color(biome.water);
}
break;
case RENDER.height:
color = new THREE.Color(nh,nh,nh);
break;
case RENDER.humidity:
color = new THREE.Color(hub,hub,hub);
break;
case RENDER.height_X_humidity:
color = new THREE.Color(hub*nh,hub*nh,hub*nh);
break;
}
return color;
}
var noiseGen = null;
function newVoronoi() {
var t0 = performance.now();
if (mesh) {
scene.remove(mesh.group);
}
var t = new Terrain();
var voronoiDiagram = createVoronoi(settings.sites);
noiseGen = new noise({
frequency: settings.heightFrequency,
max: settings.heightScale,
min: 0,
octaves: 8,
persistence: 0.5
});
t.build(voronoiDiagram, {
calculateHeight: function(x,y) {
var h = noiseGen.scaled2D(x, y);
var l = 0.2;
var seaHeight = settings.heightScale * settings.seaLevel;
var p = Math.min(1, x / l, y / l, Math.min(1 - x, l) / l, Math.min(1 - y, l) / l);
return Math.max( (h * (Math.pow(p, 0.5) || 0 )) - seaHeight, 0 );
},
calculateColor: calculateColor
});
map = t;
mesh = t.createMesh({
mapMaterial: new THREE.MeshPhongMaterial({
vertexColors: THREE.VertexColors,
shininess: 5,
shading: THREE.SmoothShading
})
});
mesh.map.originalMaterial = mesh.map.material;
mesh.border.visible = settings.showPolygons;
mesh.sites.visible = settings.showSites;
scene.add(mesh.group);
mesh.group.position.set(-0.5, 0, -0.5);
var t1 = performance.now();
console.log("Voronoi generation took " + (t1 - t0) + " ms.");
makeFlat(settings.flat);
}
newVoronoi();
function render() {
stats.begin();
renderer.render( scene, camera );
stats.end();
requestAnimationFrame( render );
}
render();
| {
if (!map) {
return;
}
var cells = map.voronoi.cells;
for (var i = 0; i < cells.length; i++) {
var cell = cells[i];
var faces = cell.faces;
for (var j = 0; j < faces.length; j++) {
var f = faces[j];
var c = f.color;
c.setHex(calculateColor(cell.height, cell.site.data[0], cell.site.data[1]).getHex());
}
}
mesh.map.geometry.dynamic = true;
mesh.map.geometry.colorsNeedUpdate = true;
} | identifier_body |
render.js | import * as THREE from "three";
import Stats from "stats.js";
import * as d3 from "d3";
import seedrandom from "seedrandom";
import * as dat from "exdat";
import noise from "fast-simplex-noise";
import Terrain from "../lib/terrain.js";
var style = document.createElement("style");
style.type = "text/css";
// TODO remove overflow hidden from body
style.appendChild(document.createTextNode("body {margin: 0; overflow:hidden}\n canvas {width:100%; height: 100%;}"));
document.head.appendChild(style);
function walk(cells, edges, cell, distance) {
for (var i = 0; i < cell.faces.length; i++) {
cell.faces[i].color.setRGB(1, 0, 0);
}
for (var i = 0; i < cell.halfedges.length; i++) {
var edge = edges[cell.halfedges[i]];
var thisSite = cell.site;
var neighboorSite = edge.left == cell.site ? edge.right : edge.left;
if (neighboorSite) {
var neighboorCell = cells[neighboorSite.index];
var neighboorDistance = new THREE.Vector2(thisSite[0] - neighboorSite[0], thisSite[1] - neighboorSite[1]).length();
if (neighboorDistance <= distance) {
walk(cells, edges, neighboorCell, distance - neighboorDistance);
}
}
}
}
window.addEventListener("keyup", function (e) {
var char = String.fromCharCode(e.keyCode);
if (char == 'R') {
newVoronoi();
} else if (char == 'T') {
var x = Math.random(), y = Math.random();
var r = 0.05;
if (map) {
var site = map.voronoi.find(x, y);
if (site) {
var cell = map.voronoi.cells[site.index];
walk(map.voronoi.cells, map.voronoi.edges, cell, r);
mesh.map.geometry.colorsNeedUpdate = true;
}
}
}
});
var lastMouse;
var cameraTarget = new THREE.Vector3(0, 0, 0);
window.addEventListener("mousemove", function (e) {
e.preventDefault();
if ( e.buttons == 4 ) {
var diff = cameraTarget.clone().sub(camera.position);
diff.y = 0;
var radious = diff.length();
if (!lastMouse) {
lastMouse = { x:e.clientX, y:e.clientY, phi: camera.position.angleTo(new THREE.Vector3(0,0,1)), theta: Math.asin(camera.position.x / radious) };
}
var theta = lastMouse.theta + ( ( e.clientY - lastMouse.y ) * Math.PI * 0.01 );
var phi = lastMouse.phi + ( ( e.clientX - lastMouse.x ) * 0.5 );
camera.position.x = radious * Math.sin( theta );
camera.position.z = radious * Math.cos( theta );
camera.lookAt(cameraTarget);
camera.updateMatrix();
} else {
lastMouse = null;
}
});
window.addEventListener("wheel", function (e) {
var direction = cameraTarget.clone().sub(camera.position);
var len = direction.length();
direction.normalize();
camera.position.add(direction.multiplyScalar(e.deltaY * -0.01 * Math.min(1, Math.pow(len, 5))));
});
//var rand = seedrandom("andre", { global: true });
var container = document.body;
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 0.1, 1000 );
camera.position.set( 0, 2, -2 );
camera.lookAt(cameraTarget);
scene.add( camera );
var light = new THREE.PointLight( 0xffffff, 0.8 );
camera.add( light );
var grid = new THREE.GridHelper(2, 20);
scene.add(grid);
var renderer = new THREE.WebGLRenderer( { antialias: true } );
renderer.setClearColor( 0xf0f0f0 );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
var stats = new Stats();
container.appendChild( stats.dom );
var RENDER = {
biome: 0,
height: 1,
humidity: 2,
height_X_humidity: 3
}
function Settings() {
this.seed = 0;
this.sites = 4000;
this.showPolygons = false;
this.showSites = false; | this.seaLevel = 0.45;
this.heightFrequency = 1.2;
this.heightScale = 0.3;
this.render = "biome";
}
var gui = new dat.GUI();
var settings = new Settings();
gui.add(settings, "sites", 0).step(50).onFinishChange(newVoronoi);
gui.add(settings, "heightScale", 0, 1).step(0.01);
gui.add(settings, "heightFrequency").step(0.01).onFinishChange(newVoronoi);
gui.add(settings, "showPolygons").onFinishChange(function(v) { if (mesh) { mesh.border.visible = v; } });
gui.add(settings, "showSites").onFinishChange(function(v) { if (mesh) { mesh.sites.visible = v; } });;
gui.add(settings, "flat").onFinishChange(makeFlat);
gui.add(settings, "seaLevel", 0, 1).step(0.01).onFinishChange(newVoronoi);
gui.add(settings, "render", Object.keys(RENDER)).onFinishChange(changeRender);
function changeRender() {
if (!map) {
return;
}
var cells = map.voronoi.cells;
for (var i = 0; i < cells.length; i++) {
var cell = cells[i];
var faces = cell.faces;
for (var j = 0; j < faces.length; j++) {
var f = faces[j];
var c = f.color;
c.setHex(calculateColor(cell.height, cell.site.data[0], cell.site.data[1]).getHex());
}
}
mesh.map.geometry.dynamic = true;
mesh.map.geometry.colorsNeedUpdate = true;
}
function makeFlat(f) {
function mf(g) {
if (!g.old) {
g.old = g.vertices.map(function(v,i) {
return v.y;
});
}
g.vertices.map(function (v, i) {
if (f) { v.y = 0; } else { v.y = g.old[i]; }
});
g.dynamic = g.verticesNeedUpdate = true;
}
if (mesh) {
mf(mesh.map.geometry);
mf(mesh.sites.geometry);
mf(mesh.border.geometry);
if (!f) {
mesh.map.material = mesh.map.originalMaterial;
} else {
mesh.map.material = new THREE.MeshBasicMaterial( { side: THREE.FrontSide, vertexColors: THREE.FaceColors } );
}
}
}
var width = 1;
var height = 1;
function createVoronoi(nsites) {
var width = 1;
var height = 1;
var voronoi = d3.voronoi()
.extent([[0, 0], [width, height]]);
function generateSites() {
return d3.range(nsites)
.map(function(d) { return [Math.random() * width, Math.random() * height]; });
}
function calculateCentroid(pts) {
var x = 0;
var y = 0;
for (var i = 0; i < pts.length; i++) {
x += pts[i][0];
y += pts[i][1];
}
return [x/pts.length, y/pts.length];
}
function loydIteration(sites, iterations) {
iterations = iterations || 1;
for (var i = 0; i < iterations; i++) {
sites = voronoi(sites)
.polygons()
.map(calculateCentroid);
}
return sites;
}
var sites = loydIteration(generateSites(), 1);
var diag = voronoi(sites);
diag.sites = sites;
return diag;
}
var mesh;
var map;
function calculateColor(h, x, y) {
var color;
var seaHeight = settings.heightScale * settings.seaLevel;
var nh = h / (settings.heightScale - seaHeight);
var p = Math.max(nh, 0.5) / 0xff;
var humidity = (noiseGen.raw2D(10+x,10+y) + 1) / 2;
var biome = {
water: 0x1a3560,
scorched: 0x999999,
bare: 0xbbbbbb,
tundra: 0xddddbb,
snow: 0xf8f8f8,
taiga: 0xccd4bb,
shrubland: 0xc4ccbb,
temperateDesert: 0xe4e8ca,
temperateRainFlorest: 0xa4c4a8,
temperateDeciduousFlorest: 0xb4c9a9,
grassland: 0xc4d4aa,
tropicalRainForest: 0x9cbba9,
tropicalSeasonalForest: 0xa9cca4,
subtropicalDesert: 0xe9ddc7
};
// first dimension is height, second is humidity
var colorMap = [
// very dry, dry damp wet very wet drenched
[ biome.subtropicalDesert, biome.grassland, biome.tropicalSeasonalForest, biome.tropicalRainForest, biome.tropicalRainForest, biome.tropicalRainForest], // height level 1
[ biome.temperateDesert, biome.grassland, biome.grassland, biome.temperateDeciduousFlorest, biome.temperateDeciduousFlorest, biome.temperateRainFlorest], // height level 2
[ biome.temperateDesert, biome.temperateDesert, biome.shrubland, biome.shrubland, biome.taiga, biome.snow], // height level 3
[ biome.scorched, biome.bare, biome.tundra, biome.snow, biome.snow, biome.snow] // height level 4
];
var heb = nh;
var hub = humidity;
var hei = heb < 1/4 ? 0 : heb < 2/4 ? 1 : heb < 3/4 ? 2 : 3;
var hui = hub < 1/6 ? 0 : hub < 2/6 ? 1 : hub < 3/6 ? 2 : hub < 4/6 ? 3 : hub < 5/6 ? 4 : 5;
switch (RENDER[settings.render]) {
case RENDER.biome:
if (nh > 0) {
color = new THREE.Color(colorMap[hei][hui]);
} else {
// water
color = new THREE.Color(biome.water);
}
break;
case RENDER.height:
color = new THREE.Color(nh,nh,nh);
break;
case RENDER.humidity:
color = new THREE.Color(hub,hub,hub);
break;
case RENDER.height_X_humidity:
color = new THREE.Color(hub*nh,hub*nh,hub*nh);
break;
}
return color;
}
var noiseGen = null;
function newVoronoi() {
var t0 = performance.now();
if (mesh) {
scene.remove(mesh.group);
}
var t = new Terrain();
var voronoiDiagram = createVoronoi(settings.sites);
noiseGen = new noise({
frequency: settings.heightFrequency,
max: settings.heightScale,
min: 0,
octaves: 8,
persistence: 0.5
});
t.build(voronoiDiagram, {
calculateHeight: function(x,y) {
var h = noiseGen.scaled2D(x, y);
var l = 0.2;
var seaHeight = settings.heightScale * settings.seaLevel;
var p = Math.min(1, x / l, y / l, Math.min(1 - x, l) / l, Math.min(1 - y, l) / l);
return Math.max( (h * (Math.pow(p, 0.5) || 0 )) - seaHeight, 0 );
},
calculateColor: calculateColor
});
map = t;
mesh = t.createMesh({
mapMaterial: new THREE.MeshPhongMaterial({
vertexColors: THREE.VertexColors,
shininess: 5,
shading: THREE.SmoothShading
})
});
mesh.map.originalMaterial = mesh.map.material;
mesh.border.visible = settings.showPolygons;
mesh.sites.visible = settings.showSites;
scene.add(mesh.group);
mesh.group.position.set(-0.5, 0, -0.5);
var t1 = performance.now();
console.log("Voronoi generation took " + (t1 - t0) + " ms.");
makeFlat(settings.flat);
}
newVoronoi();
function render() {
stats.begin();
renderer.render( scene, camera );
stats.end();
requestAnimationFrame( render );
}
render(); | this.flat = false; | random_line_split |
render.js | import * as THREE from "three";
import Stats from "stats.js";
import * as d3 from "d3";
import seedrandom from "seedrandom";
import * as dat from "exdat";
import noise from "fast-simplex-noise";
import Terrain from "../lib/terrain.js";
var style = document.createElement("style");
style.type = "text/css";
// TODO remove overflow hidden from body
style.appendChild(document.createTextNode("body {margin: 0; overflow:hidden}\n canvas {width:100%; height: 100%;}"));
document.head.appendChild(style);
function walk(cells, edges, cell, distance) {
for (var i = 0; i < cell.faces.length; i++) {
cell.faces[i].color.setRGB(1, 0, 0);
}
for (var i = 0; i < cell.halfedges.length; i++) {
var edge = edges[cell.halfedges[i]];
var thisSite = cell.site;
var neighboorSite = edge.left == cell.site ? edge.right : edge.left;
if (neighboorSite) {
var neighboorCell = cells[neighboorSite.index];
var neighboorDistance = new THREE.Vector2(thisSite[0] - neighboorSite[0], thisSite[1] - neighboorSite[1]).length();
if (neighboorDistance <= distance) {
walk(cells, edges, neighboorCell, distance - neighboorDistance);
}
}
}
}
window.addEventListener("keyup", function (e) {
var char = String.fromCharCode(e.keyCode);
if (char == 'R') {
newVoronoi();
} else if (char == 'T') {
var x = Math.random(), y = Math.random();
var r = 0.05;
if (map) {
var site = map.voronoi.find(x, y);
if (site) {
var cell = map.voronoi.cells[site.index];
walk(map.voronoi.cells, map.voronoi.edges, cell, r);
mesh.map.geometry.colorsNeedUpdate = true;
}
}
}
});
var lastMouse;
var cameraTarget = new THREE.Vector3(0, 0, 0);
window.addEventListener("mousemove", function (e) {
e.preventDefault();
if ( e.buttons == 4 ) {
var diff = cameraTarget.clone().sub(camera.position);
diff.y = 0;
var radious = diff.length();
if (!lastMouse) {
lastMouse = { x:e.clientX, y:e.clientY, phi: camera.position.angleTo(new THREE.Vector3(0,0,1)), theta: Math.asin(camera.position.x / radious) };
}
var theta = lastMouse.theta + ( ( e.clientY - lastMouse.y ) * Math.PI * 0.01 );
var phi = lastMouse.phi + ( ( e.clientX - lastMouse.x ) * 0.5 );
camera.position.x = radious * Math.sin( theta );
camera.position.z = radious * Math.cos( theta );
camera.lookAt(cameraTarget);
camera.updateMatrix();
} else {
lastMouse = null;
}
});
window.addEventListener("wheel", function (e) {
var direction = cameraTarget.clone().sub(camera.position);
var len = direction.length();
direction.normalize();
camera.position.add(direction.multiplyScalar(e.deltaY * -0.01 * Math.min(1, Math.pow(len, 5))));
});
//var rand = seedrandom("andre", { global: true });
var container = document.body;
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 0.1, 1000 );
camera.position.set( 0, 2, -2 );
camera.lookAt(cameraTarget);
scene.add( camera );
var light = new THREE.PointLight( 0xffffff, 0.8 );
camera.add( light );
var grid = new THREE.GridHelper(2, 20);
scene.add(grid);
var renderer = new THREE.WebGLRenderer( { antialias: true } );
renderer.setClearColor( 0xf0f0f0 );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
var stats = new Stats();
container.appendChild( stats.dom );
var RENDER = {
biome: 0,
height: 1,
humidity: 2,
height_X_humidity: 3
}
function Settings() {
this.seed = 0;
this.sites = 4000;
this.showPolygons = false;
this.showSites = false;
this.flat = false;
this.seaLevel = 0.45;
this.heightFrequency = 1.2;
this.heightScale = 0.3;
this.render = "biome";
}
var gui = new dat.GUI();
var settings = new Settings();
gui.add(settings, "sites", 0).step(50).onFinishChange(newVoronoi);
gui.add(settings, "heightScale", 0, 1).step(0.01);
gui.add(settings, "heightFrequency").step(0.01).onFinishChange(newVoronoi);
gui.add(settings, "showPolygons").onFinishChange(function(v) { if (mesh) { mesh.border.visible = v; } });
gui.add(settings, "showSites").onFinishChange(function(v) { if (mesh) { mesh.sites.visible = v; } });;
gui.add(settings, "flat").onFinishChange(makeFlat);
gui.add(settings, "seaLevel", 0, 1).step(0.01).onFinishChange(newVoronoi);
gui.add(settings, "render", Object.keys(RENDER)).onFinishChange(changeRender);
function changeRender() {
if (!map) {
return;
}
var cells = map.voronoi.cells;
for (var i = 0; i < cells.length; i++) {
var cell = cells[i];
var faces = cell.faces;
for (var j = 0; j < faces.length; j++) {
var f = faces[j];
var c = f.color;
c.setHex(calculateColor(cell.height, cell.site.data[0], cell.site.data[1]).getHex());
}
}
mesh.map.geometry.dynamic = true;
mesh.map.geometry.colorsNeedUpdate = true;
}
function | (f) {
function mf(g) {
if (!g.old) {
g.old = g.vertices.map(function(v,i) {
return v.y;
});
}
g.vertices.map(function (v, i) {
if (f) { v.y = 0; } else { v.y = g.old[i]; }
});
g.dynamic = g.verticesNeedUpdate = true;
}
if (mesh) {
mf(mesh.map.geometry);
mf(mesh.sites.geometry);
mf(mesh.border.geometry);
if (!f) {
mesh.map.material = mesh.map.originalMaterial;
} else {
mesh.map.material = new THREE.MeshBasicMaterial( { side: THREE.FrontSide, vertexColors: THREE.FaceColors } );
}
}
}
var width = 1;
var height = 1;
function createVoronoi(nsites) {
var width = 1;
var height = 1;
var voronoi = d3.voronoi()
.extent([[0, 0], [width, height]]);
function generateSites() {
return d3.range(nsites)
.map(function(d) { return [Math.random() * width, Math.random() * height]; });
}
function calculateCentroid(pts) {
var x = 0;
var y = 0;
for (var i = 0; i < pts.length; i++) {
x += pts[i][0];
y += pts[i][1];
}
return [x/pts.length, y/pts.length];
}
function loydIteration(sites, iterations) {
iterations = iterations || 1;
for (var i = 0; i < iterations; i++) {
sites = voronoi(sites)
.polygons()
.map(calculateCentroid);
}
return sites;
}
var sites = loydIteration(generateSites(), 1);
var diag = voronoi(sites);
diag.sites = sites;
return diag;
}
var mesh;
var map;
function calculateColor(h, x, y) {
var color;
var seaHeight = settings.heightScale * settings.seaLevel;
var nh = h / (settings.heightScale - seaHeight);
var p = Math.max(nh, 0.5) / 0xff;
var humidity = (noiseGen.raw2D(10+x,10+y) + 1) / 2;
var biome = {
water: 0x1a3560,
scorched: 0x999999,
bare: 0xbbbbbb,
tundra: 0xddddbb,
snow: 0xf8f8f8,
taiga: 0xccd4bb,
shrubland: 0xc4ccbb,
temperateDesert: 0xe4e8ca,
temperateRainFlorest: 0xa4c4a8,
temperateDeciduousFlorest: 0xb4c9a9,
grassland: 0xc4d4aa,
tropicalRainForest: 0x9cbba9,
tropicalSeasonalForest: 0xa9cca4,
subtropicalDesert: 0xe9ddc7
};
// first dimension is height, second is humidity
var colorMap = [
// very dry, dry damp wet very wet drenched
[ biome.subtropicalDesert, biome.grassland, biome.tropicalSeasonalForest, biome.tropicalRainForest, biome.tropicalRainForest, biome.tropicalRainForest], // height level 1
[ biome.temperateDesert, biome.grassland, biome.grassland, biome.temperateDeciduousFlorest, biome.temperateDeciduousFlorest, biome.temperateRainFlorest], // height level 2
[ biome.temperateDesert, biome.temperateDesert, biome.shrubland, biome.shrubland, biome.taiga, biome.snow], // height level 3
[ biome.scorched, biome.bare, biome.tundra, biome.snow, biome.snow, biome.snow] // height level 4
];
var heb = nh;
var hub = humidity;
var hei = heb < 1/4 ? 0 : heb < 2/4 ? 1 : heb < 3/4 ? 2 : 3;
var hui = hub < 1/6 ? 0 : hub < 2/6 ? 1 : hub < 3/6 ? 2 : hub < 4/6 ? 3 : hub < 5/6 ? 4 : 5;
switch (RENDER[settings.render]) {
case RENDER.biome:
if (nh > 0) {
color = new THREE.Color(colorMap[hei][hui]);
} else {
// water
color = new THREE.Color(biome.water);
}
break;
case RENDER.height:
color = new THREE.Color(nh,nh,nh);
break;
case RENDER.humidity:
color = new THREE.Color(hub,hub,hub);
break;
case RENDER.height_X_humidity:
color = new THREE.Color(hub*nh,hub*nh,hub*nh);
break;
}
return color;
}
var noiseGen = null;
function newVoronoi() {
var t0 = performance.now();
if (mesh) {
scene.remove(mesh.group);
}
var t = new Terrain();
var voronoiDiagram = createVoronoi(settings.sites);
noiseGen = new noise({
frequency: settings.heightFrequency,
max: settings.heightScale,
min: 0,
octaves: 8,
persistence: 0.5
});
t.build(voronoiDiagram, {
calculateHeight: function(x,y) {
var h = noiseGen.scaled2D(x, y);
var l = 0.2;
var seaHeight = settings.heightScale * settings.seaLevel;
var p = Math.min(1, x / l, y / l, Math.min(1 - x, l) / l, Math.min(1 - y, l) / l);
return Math.max( (h * (Math.pow(p, 0.5) || 0 )) - seaHeight, 0 );
},
calculateColor: calculateColor
});
map = t;
mesh = t.createMesh({
mapMaterial: new THREE.MeshPhongMaterial({
vertexColors: THREE.VertexColors,
shininess: 5,
shading: THREE.SmoothShading
})
});
mesh.map.originalMaterial = mesh.map.material;
mesh.border.visible = settings.showPolygons;
mesh.sites.visible = settings.showSites;
scene.add(mesh.group);
mesh.group.position.set(-0.5, 0, -0.5);
var t1 = performance.now();
console.log("Voronoi generation took " + (t1 - t0) + " ms.");
makeFlat(settings.flat);
}
newVoronoi();
function render() {
stats.begin();
renderer.render( scene, camera );
stats.end();
requestAnimationFrame( render );
}
render();
| makeFlat | identifier_name |
build_all.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run docker build -> test -> push workflow for all repositories and images.
"""
import hashlib
import json
import logging
import os
import re
import subprocess
from datetime import datetime
import docker
from pynamodb.attributes import UnicodeAttribute
from pynamodb.exceptions import DoesNotExist
from pynamodb.models import Model
def create_logger():
logger = logging.getLogger("ci-runner")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
# --- Json utility ---
def read_text(path):
with open(path, "rb") as f:
content = f.read().decode("utf-8")
return content
def strip_comment_line_with_symbol(line, start):
"""
Strip comments from line string.
"""
parts = line.split(start)
counts = [len(re.findall(r'(?:^|[^"\\]|(?:\\\\|\\")+)(")', part))
for part in parts]
total = 0
for nr, count in enumerate(counts):
total += count
if total % 2 == 0:
return start.join(parts[:nr + 1]).rstrip()
else: # pragma: no cover
return line.rstrip()
def strip_comments(string, comment_symbols=frozenset(('#', '//'))):
"""
Strip comments from json string.
:param string: A string containing json with comments started by comment_symbols.
:param comment_symbols: Iterable of symbols that start a line comment (default # or //).
:return: The string with the comments removed.
"""
lines = string.splitlines()
for k in range(len(lines)):
for symbol in comment_symbols:
lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)
return '\n'.join(lines)
def read_json(file_path):
"""
Read dict data from json file
:type file_path: str
:rtype: dict
"""
return json.loads(strip_comments(read_text(file_path)))
def get_json_value(file_path, json_path):
"""
Read specific field from JSON file.
:type file_path: str
:param file_path: the absolute path for a json file
:type json_path: str
:param json_path: json path notation.
"""
# find absolute path
cwd = os.getcwd()
if not os.path.isabs(file_path):
file_path = os.path.abspath(os.path.join(cwd, file_path))
# fix json_path
if json_path.startswith("$."):
json_path = json_path.replace("$.", "", 1)
with open(file_path, "rb") as f:
data = json.loads(strip_comments(f.read().decode("utf-8")))
value = data
for part in json_path.split("."):
if part in value:
value = value[part]
else:
raise ValueError("'$.{}' not found in {}".format(json_path, file_path))
return value
def get_dockerfile_md5(dockerfile_path):
"""
Get md5 check sum of a dockerfile, comments, empty line, tailing space
are ignored.
:param dockerfile_path: the absolute path of the Dockerfile
:rtype: str
"""
valid_lines = list()
with open(dockerfile_path, "rb") as f:
lines = f.read().decode("utf-8").split("\n")
for line in lines:
line = line.rstrip()
# ignore comment line
if line.startswith("#"):
continue
# ignore empty line
if not bool(line):
continue
# trim tailing comment
if "#" in line:
line = line[:-(line[::-1].index("#") + 1)].rstrip()
if line:
valid_lines.append(line)
else:
valid_lines.append(line)
md5 = hashlib.md5()
md5.update("\n".join(valid_lines).encode("utf-8"))
return md5.hexdigest()
DIR_HERE = os.path.abspath(os.path.dirname(__file__))
DIR_PROJECT_ROOT = DIR_HERE
DIR_CICD = DIR_HERE
DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos")
# config file path
PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"]
PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json")
DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"]
IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
# --- Load Configs ---
# detect runtime
class Runtime:
local = "local"
circleci = "circleci"
if os.environ.get("CIRCLECI"):
runtime = Runtime.circleci
else:
runtime = Runtime.local
# resolve config
if runtime == Runtime.local:
AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION")
AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE")
# set environment variable, allow pynamodb to detect credential
os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE
os.environ["AWS_DEFAULT_REGION"] = AWS_REGION
try:
DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD")
except:
DOCKER_HUB_PASSWORD = ""
GIT_BRANCH = ""
elif runtime == Runtime.circleci:
AWS_REGION = os.environ["AWS_DEFAULT_REGION"]
AWS_PROFILE = None
DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"]
GIT_BRANCH = os.environ["CIRCLE_BRANCH"]
else:
raise NotImplementedError
logger = create_logger()
docker_client = docker.from_env()
# --- Image State DynamoDB backend
class ImageModel(Model):
class Meta:
table_name = "docker-image-state"
region = AWS_REGION
identifier = UnicodeAttribute(hash_key=True)
md5 = UnicodeAttribute()
last_update = UnicodeAttribute()
@property
def last_update_datetime(self):
"""
datetime type of ``last_update``
"""
return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f")
dockerhub_username = DOCKER_HUB_USERNAME # type: str
dir_repo_root = None # type: str
dir_tag_root = None # type: str
is_state_exists = None # type: bool
_repo_name = None # type: str
@property
def repo_name(self):
if self._repo_name is None:
self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip()
return self._repo_name
_tag_name = None # type: str
@property
def tag_name(self):
if self._tag_name is None:
self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip()
return self._tag_name
@property
def dockerfile_path(self):
return os.path.join(self.dir_tag_root, "Dockerfile")
def has_dockerfile(self):
return os.path.exists(self.dockerfile_path)
_dockerfile_md5 = None # type: str
@property
def dockerfile_md5(self):
if self._dockerfile_md5 is None:
self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path)
return self._dockerfile_md5
@property
def local_identifier(self):
return f"{self.repo_name}:{self.tag_name}"
@property
def dockerhub_identifier(self):
return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}"
@property
def awsecr_identifier(self):
raise NotImplementedError
@property
def smoke_test_script_path(self):
return os.path.join(self.dir_tag_root, "smoke-test.sh")
def run_docker_build(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Build docker image in context at {self.dir_tag_root} ...")
try:
run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root])
self.last_update = str(datetime.utcnow())
logger.info(" Build success!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Build failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_smoke_test(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Run smoke test script {self.smoke_test_script_path}...")
try:
run_and_log_command(["bash", self.smoke_test_script_path])
logger.info(" Test passed!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Test failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_docker_push(self, docker_client):
logger.info(f"Push docker image {self.identifier} ...")
if REGISTRY_SERVICE == "dockerhub":
remote_identifier = self.dockerhub_identifier
elif REGISTRY_SERVICE == "awsecr":
remote_identifier = self.awsecr_identifier
else:
raise ValueError
try:
run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier])
docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name)
logger.info(" Success!")
if self.is_state_exists:
self.update(
actions=[
ImageModel.md5.set(self.md5),
ImageModel.last_update.set(self.last_update)
]
)
else:
self.save()
return True
except subprocess.CalledProcessError as e:
logger.info(" Push failed!")
logger.info(" {}".format(e))
return False
except Exception as e:
logger.info(" {}".format(e))
return False
ImageModel.create_table(billing_mode="PAY_PER_REQUEST")
def plan_image_to_build():
"""
:rtype: typing.List[ImageModel]
:return:
"""
logger.info("Scan code repo to scheduler docker build ...")
image_list = list()
for repo_folder in os.listdir(DIR_REPOS):
dir_repo_root = os.path.join(DIR_REPOS, repo_folder)
if not os.path.isdir(dir_repo_root):
continue
for tag_folder in os.listdir(dir_repo_root):
dir_tag_root = os.path.join(dir_repo_root, tag_folder)
image = ImageModel()
image.dir_repo_root = dir_repo_root
image.dir_tag_root = dir_tag_root
if not image.has_dockerfile():
continue
logger.info(f" Detected '{image.local_identifier}' image")
try:
_image = ImageModel.get(image.local_identifier) # type: ImageModel
image.identifier = _image.identifier
image.md5 = _image.md5
image.last_update = _image.last_update
_image.is_state_exists = True
if image.md5 == image.dockerfile_md5:
if (datetime.utcnow() - image.last_update_datetime).total_seconds() > IMAGE_REBUILD_INTERVAL:
is_todo = True
logger.info(
" Dockerfile not changed, but it is out dated "
"due to the IMAGE_REBUILD_INTERVAL setting, we need to build this one")
else:
is_todo = False
logger.info(" Dockerfile not changed, and not beyond the IMAGE_REBUILD_INTERVAL setting")
logger.info(" skip this image")
else:
is_todo = True
logger.info(" Dockerfile has changed, we need to rebuild the image")
except DoesNotExist:
logger.info(" State not exists, we need to build this one")
is_todo = True
image.identifier = image.local_identifier
image.md5 = image.dockerfile_md5
image.is_state_exists = False
except Exception as e:
raise e
if is_todo:
|
logger.info("--- build plan summary ---")
if len(image_list):
logger.info("we got these images to build")
for image in image_list:
logger.info(f" {image.local_identifier}")
else:
logger.info("we got NO image to build")
return image_list
def run_and_log_command(commands):
logger.info("Run >>> {}".format(" ".join(commands)))
subprocess.check_output(commands)
def run_build_image(image_list):
"""
Build and test Images.
:type image_list: typing.List[ImageModel]
:param image_list:
:rtype: typing.Tuple[typing.List[ImageModel]]
:return:
"""
success_image_list = list()
failed_image_list = list()
for image in image_list:
docker_build_success_flag = image.run_docker_build()
if not docker_build_success_flag:
failed_image_list.append(image)
continue
smoke_test_sccess_flag = image.run_smoke_test()
if not smoke_test_sccess_flag:
failed_image_list.append(image)
continue
success_image_list.append(image)
logger.info("--- docker build summary ---")
logger.info("following image succeed:")
for image in success_image_list:
logger.info(f" {image.local_identifier}")
logger.info("following image failed:")
for image in failed_image_list:
logger.info(f" {image.local_identifier}")
return success_image_list, failed_image_list
def run_docker_push(image_list, docker_client):
"""
Push built images to registry.
:type image_list: typing.List[ImageModel]
:param image_list:
"""
logger.info("--- push image to registry ---")
if runtime == Runtime.local:
logger.info("Detected local runtime, stop here.")
return
if not len(success_image_list):
logger.info("No success image to push, stop here.")
return
if GIT_BRANCH != "master":
logger.info("Not master branch, stop here")
return
docker_client.login(username=DOCKER_HUB_USERNAME, password=DOCKER_HUB_PASSWORD)
for image in image_list:
image.run_docker_push(docker_client)
print("Finished.")
if __name__ == "__main__":
todo_image_list = plan_image_to_build()
success_image_list, failed_image_list = run_build_image(todo_image_list)
run_docker_push(success_image_list, docker_client)
| image_list.append(image) | conditional_block |
build_all.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run docker build -> test -> push workflow for all repositories and images.
"""
import hashlib
import json
import logging
import os
import re
import subprocess
from datetime import datetime
import docker
from pynamodb.attributes import UnicodeAttribute
from pynamodb.exceptions import DoesNotExist
from pynamodb.models import Model
def create_logger():
logger = logging.getLogger("ci-runner")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
# --- Json utility ---
def read_text(path):
with open(path, "rb") as f:
content = f.read().decode("utf-8")
return content
def strip_comment_line_with_symbol(line, start):
"""
Strip comments from line string.
"""
parts = line.split(start)
counts = [len(re.findall(r'(?:^|[^"\\]|(?:\\\\|\\")+)(")', part))
for part in parts]
total = 0
for nr, count in enumerate(counts):
total += count
if total % 2 == 0:
return start.join(parts[:nr + 1]).rstrip()
else: # pragma: no cover
return line.rstrip()
def strip_comments(string, comment_symbols=frozenset(('#', '//'))):
"""
Strip comments from json string.
:param string: A string containing json with comments started by comment_symbols.
:param comment_symbols: Iterable of symbols that start a line comment (default # or //).
:return: The string with the comments removed.
"""
lines = string.splitlines()
for k in range(len(lines)):
for symbol in comment_symbols:
lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)
return '\n'.join(lines)
def read_json(file_path):
"""
Read dict data from json file
:type file_path: str
:rtype: dict
"""
return json.loads(strip_comments(read_text(file_path)))
def get_json_value(file_path, json_path):
"""
Read specific field from JSON file.
:type file_path: str
:param file_path: the absolute path for a json file
:type json_path: str
:param json_path: json path notation.
"""
# find absolute path
cwd = os.getcwd()
if not os.path.isabs(file_path):
file_path = os.path.abspath(os.path.join(cwd, file_path))
# fix json_path
if json_path.startswith("$."):
json_path = json_path.replace("$.", "", 1)
with open(file_path, "rb") as f:
data = json.loads(strip_comments(f.read().decode("utf-8")))
value = data
for part in json_path.split("."):
if part in value:
value = value[part]
else:
raise ValueError("'$.{}' not found in {}".format(json_path, file_path))
return value
def get_dockerfile_md5(dockerfile_path):
"""
Get md5 check sum of a dockerfile, comments, empty line, tailing space
are ignored.
:param dockerfile_path: the absolute path of the Dockerfile
:rtype: str
"""
valid_lines = list()
with open(dockerfile_path, "rb") as f:
lines = f.read().decode("utf-8").split("\n")
for line in lines:
line = line.rstrip()
# ignore comment line
if line.startswith("#"):
continue
# ignore empty line
if not bool(line):
continue
# trim tailing comment
if "#" in line:
line = line[:-(line[::-1].index("#") + 1)].rstrip()
if line:
valid_lines.append(line)
else:
valid_lines.append(line)
md5 = hashlib.md5()
md5.update("\n".join(valid_lines).encode("utf-8"))
return md5.hexdigest()
DIR_HERE = os.path.abspath(os.path.dirname(__file__))
DIR_PROJECT_ROOT = DIR_HERE
DIR_CICD = DIR_HERE
DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos")
# config file path
PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"]
PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json")
DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"]
IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
# --- Load Configs ---
# detect runtime
class Runtime:
local = "local"
circleci = "circleci"
if os.environ.get("CIRCLECI"):
runtime = Runtime.circleci
else:
runtime = Runtime.local
# resolve config
if runtime == Runtime.local:
AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION")
AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE")
# set environment variable, allow pynamodb to detect credential
os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE
os.environ["AWS_DEFAULT_REGION"] = AWS_REGION
try:
DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD")
except:
DOCKER_HUB_PASSWORD = ""
GIT_BRANCH = ""
elif runtime == Runtime.circleci:
AWS_REGION = os.environ["AWS_DEFAULT_REGION"]
AWS_PROFILE = None
DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"]
GIT_BRANCH = os.environ["CIRCLE_BRANCH"]
else:
raise NotImplementedError
logger = create_logger()
docker_client = docker.from_env()
# --- Image State DynamoDB backend
class ImageModel(Model):
class Meta:
table_name = "docker-image-state"
region = AWS_REGION
identifier = UnicodeAttribute(hash_key=True)
md5 = UnicodeAttribute()
last_update = UnicodeAttribute()
@property
def last_update_datetime(self):
"""
datetime type of ``last_update``
"""
return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f")
dockerhub_username = DOCKER_HUB_USERNAME # type: str
dir_repo_root = None # type: str
dir_tag_root = None # type: str
is_state_exists = None # type: bool
_repo_name = None # type: str
@property
def repo_name(self):
if self._repo_name is None:
self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip()
return self._repo_name
_tag_name = None # type: str
@property
def tag_name(self):
if self._tag_name is None:
self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip()
return self._tag_name
@property
def dockerfile_path(self):
return os.path.join(self.dir_tag_root, "Dockerfile")
def has_dockerfile(self):
return os.path.exists(self.dockerfile_path)
_dockerfile_md5 = None # type: str
@property
def dockerfile_md5(self):
if self._dockerfile_md5 is None:
self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path)
return self._dockerfile_md5
@property
def local_identifier(self):
return f"{self.repo_name}:{self.tag_name}"
@property
def dockerhub_identifier(self):
return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}"
@property
def awsecr_identifier(self):
raise NotImplementedError
@property
def smoke_test_script_path(self):
return os.path.join(self.dir_tag_root, "smoke-test.sh")
def run_docker_build(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Build docker image in context at {self.dir_tag_root} ...")
try:
run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root])
self.last_update = str(datetime.utcnow())
logger.info(" Build success!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Build failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_smoke_test(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Run smoke test script {self.smoke_test_script_path}...")
try:
run_and_log_command(["bash", self.smoke_test_script_path])
logger.info(" Test passed!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Test failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_docker_push(self, docker_client):
logger.info(f"Push docker image {self.identifier} ...")
if REGISTRY_SERVICE == "dockerhub":
remote_identifier = self.dockerhub_identifier
elif REGISTRY_SERVICE == "awsecr":
remote_identifier = self.awsecr_identifier
else:
raise ValueError
try:
run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier])
docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name)
logger.info(" Success!")
if self.is_state_exists:
self.update(
actions=[
ImageModel.md5.set(self.md5),
ImageModel.last_update.set(self.last_update)
]
)
else:
self.save()
return True
except subprocess.CalledProcessError as e:
logger.info(" Push failed!")
logger.info(" {}".format(e))
return False
except Exception as e:
logger.info(" {}".format(e))
return False
ImageModel.create_table(billing_mode="PAY_PER_REQUEST")
def plan_image_to_build():
"""
:rtype: typing.List[ImageModel]
:return:
"""
logger.info("Scan code repo to scheduler docker build ...")
image_list = list()
for repo_folder in os.listdir(DIR_REPOS):
dir_repo_root = os.path.join(DIR_REPOS, repo_folder)
if not os.path.isdir(dir_repo_root):
continue
for tag_folder in os.listdir(dir_repo_root):
dir_tag_root = os.path.join(dir_repo_root, tag_folder)
image = ImageModel()
image.dir_repo_root = dir_repo_root
image.dir_tag_root = dir_tag_root
if not image.has_dockerfile():
continue
logger.info(f" Detected '{image.local_identifier}' image")
try:
_image = ImageModel.get(image.local_identifier) # type: ImageModel
image.identifier = _image.identifier
image.md5 = _image.md5
image.last_update = _image.last_update
_image.is_state_exists = True
if image.md5 == image.dockerfile_md5:
if (datetime.utcnow() - image.last_update_datetime).total_seconds() > IMAGE_REBUILD_INTERVAL:
is_todo = True
logger.info(
" Dockerfile not changed, but it is out dated "
"due to the IMAGE_REBUILD_INTERVAL setting, we need to build this one")
else:
is_todo = False
logger.info(" Dockerfile not changed, and not beyond the IMAGE_REBUILD_INTERVAL setting")
logger.info(" skip this image")
else:
is_todo = True
logger.info(" Dockerfile has changed, we need to rebuild the image")
except DoesNotExist:
logger.info(" State not exists, we need to build this one")
is_todo = True | image.identifier = image.local_identifier
image.md5 = image.dockerfile_md5
image.is_state_exists = False
except Exception as e:
raise e
if is_todo:
image_list.append(image)
logger.info("--- build plan summary ---")
if len(image_list):
logger.info("we got these images to build")
for image in image_list:
logger.info(f" {image.local_identifier}")
else:
logger.info("we got NO image to build")
return image_list
def run_and_log_command(commands):
logger.info("Run >>> {}".format(" ".join(commands)))
subprocess.check_output(commands)
def run_build_image(image_list):
"""
Build and test Images.
:type image_list: typing.List[ImageModel]
:param image_list:
:rtype: typing.Tuple[typing.List[ImageModel]]
:return:
"""
success_image_list = list()
failed_image_list = list()
for image in image_list:
docker_build_success_flag = image.run_docker_build()
if not docker_build_success_flag:
failed_image_list.append(image)
continue
smoke_test_sccess_flag = image.run_smoke_test()
if not smoke_test_sccess_flag:
failed_image_list.append(image)
continue
success_image_list.append(image)
logger.info("--- docker build summary ---")
logger.info("following image succeed:")
for image in success_image_list:
logger.info(f" {image.local_identifier}")
logger.info("following image failed:")
for image in failed_image_list:
logger.info(f" {image.local_identifier}")
return success_image_list, failed_image_list
def run_docker_push(image_list, docker_client):
"""
Push built images to registry.
:type image_list: typing.List[ImageModel]
:param image_list:
"""
logger.info("--- push image to registry ---")
if runtime == Runtime.local:
logger.info("Detected local runtime, stop here.")
return
if not len(success_image_list):
logger.info("No success image to push, stop here.")
return
if GIT_BRANCH != "master":
logger.info("Not master branch, stop here")
return
docker_client.login(username=DOCKER_HUB_USERNAME, password=DOCKER_HUB_PASSWORD)
for image in image_list:
image.run_docker_push(docker_client)
print("Finished.")
if __name__ == "__main__":
todo_image_list = plan_image_to_build()
success_image_list, failed_image_list = run_build_image(todo_image_list)
run_docker_push(success_image_list, docker_client) | random_line_split |
|
build_all.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run docker build -> test -> push workflow for all repositories and images.
"""
import hashlib
import json
import logging
import os
import re
import subprocess
from datetime import datetime
import docker
from pynamodb.attributes import UnicodeAttribute
from pynamodb.exceptions import DoesNotExist
from pynamodb.models import Model
def create_logger():
logger = logging.getLogger("ci-runner")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
# --- Json utility ---
def read_text(path):
with open(path, "rb") as f:
content = f.read().decode("utf-8")
return content
def strip_comment_line_with_symbol(line, start):
"""
Strip comments from line string.
"""
parts = line.split(start)
counts = [len(re.findall(r'(?:^|[^"\\]|(?:\\\\|\\")+)(")', part))
for part in parts]
total = 0
for nr, count in enumerate(counts):
total += count
if total % 2 == 0:
return start.join(parts[:nr + 1]).rstrip()
else: # pragma: no cover
return line.rstrip()
def | (string, comment_symbols=frozenset(('#', '//'))):
"""
Strip comments from json string.
:param string: A string containing json with comments started by comment_symbols.
:param comment_symbols: Iterable of symbols that start a line comment (default # or //).
:return: The string with the comments removed.
"""
lines = string.splitlines()
for k in range(len(lines)):
for symbol in comment_symbols:
lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)
return '\n'.join(lines)
def read_json(file_path):
"""
Read dict data from json file
:type file_path: str
:rtype: dict
"""
return json.loads(strip_comments(read_text(file_path)))
def get_json_value(file_path, json_path):
"""
Read specific field from JSON file.
:type file_path: str
:param file_path: the absolute path for a json file
:type json_path: str
:param json_path: json path notation.
"""
# find absolute path
cwd = os.getcwd()
if not os.path.isabs(file_path):
file_path = os.path.abspath(os.path.join(cwd, file_path))
# fix json_path
if json_path.startswith("$."):
json_path = json_path.replace("$.", "", 1)
with open(file_path, "rb") as f:
data = json.loads(strip_comments(f.read().decode("utf-8")))
value = data
for part in json_path.split("."):
if part in value:
value = value[part]
else:
raise ValueError("'$.{}' not found in {}".format(json_path, file_path))
return value
def get_dockerfile_md5(dockerfile_path):
"""
Get md5 check sum of a dockerfile, comments, empty line, tailing space
are ignored.
:param dockerfile_path: the absolute path of the Dockerfile
:rtype: str
"""
valid_lines = list()
with open(dockerfile_path, "rb") as f:
lines = f.read().decode("utf-8").split("\n")
for line in lines:
line = line.rstrip()
# ignore comment line
if line.startswith("#"):
continue
# ignore empty line
if not bool(line):
continue
# trim tailing comment
if "#" in line:
line = line[:-(line[::-1].index("#") + 1)].rstrip()
if line:
valid_lines.append(line)
else:
valid_lines.append(line)
md5 = hashlib.md5()
md5.update("\n".join(valid_lines).encode("utf-8"))
return md5.hexdigest()
DIR_HERE = os.path.abspath(os.path.dirname(__file__))
DIR_PROJECT_ROOT = DIR_HERE
DIR_CICD = DIR_HERE
DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos")
# config file path
PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"]
PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json")
DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"]
IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
# --- Load Configs ---
# detect runtime
class Runtime:
local = "local"
circleci = "circleci"
if os.environ.get("CIRCLECI"):
runtime = Runtime.circleci
else:
runtime = Runtime.local
# resolve config
if runtime == Runtime.local:
AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION")
AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE")
# set environment variable, allow pynamodb to detect credential
os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE
os.environ["AWS_DEFAULT_REGION"] = AWS_REGION
try:
DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD")
except:
DOCKER_HUB_PASSWORD = ""
GIT_BRANCH = ""
elif runtime == Runtime.circleci:
AWS_REGION = os.environ["AWS_DEFAULT_REGION"]
AWS_PROFILE = None
DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"]
GIT_BRANCH = os.environ["CIRCLE_BRANCH"]
else:
raise NotImplementedError
logger = create_logger()
docker_client = docker.from_env()
# --- Image State DynamoDB backend
class ImageModel(Model):
class Meta:
table_name = "docker-image-state"
region = AWS_REGION
identifier = UnicodeAttribute(hash_key=True)
md5 = UnicodeAttribute()
last_update = UnicodeAttribute()
@property
def last_update_datetime(self):
"""
datetime type of ``last_update``
"""
return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f")
dockerhub_username = DOCKER_HUB_USERNAME # type: str
dir_repo_root = None # type: str
dir_tag_root = None # type: str
is_state_exists = None # type: bool
_repo_name = None # type: str
@property
def repo_name(self):
if self._repo_name is None:
self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip()
return self._repo_name
_tag_name = None # type: str
@property
def tag_name(self):
if self._tag_name is None:
self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip()
return self._tag_name
@property
def dockerfile_path(self):
return os.path.join(self.dir_tag_root, "Dockerfile")
def has_dockerfile(self):
return os.path.exists(self.dockerfile_path)
_dockerfile_md5 = None # type: str
@property
def dockerfile_md5(self):
if self._dockerfile_md5 is None:
self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path)
return self._dockerfile_md5
@property
def local_identifier(self):
return f"{self.repo_name}:{self.tag_name}"
@property
def dockerhub_identifier(self):
return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}"
@property
def awsecr_identifier(self):
raise NotImplementedError
@property
def smoke_test_script_path(self):
return os.path.join(self.dir_tag_root, "smoke-test.sh")
def run_docker_build(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Build docker image in context at {self.dir_tag_root} ...")
try:
run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root])
self.last_update = str(datetime.utcnow())
logger.info(" Build success!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Build failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_smoke_test(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Run smoke test script {self.smoke_test_script_path}...")
try:
run_and_log_command(["bash", self.smoke_test_script_path])
logger.info(" Test passed!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Test failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_docker_push(self, docker_client):
logger.info(f"Push docker image {self.identifier} ...")
if REGISTRY_SERVICE == "dockerhub":
remote_identifier = self.dockerhub_identifier
elif REGISTRY_SERVICE == "awsecr":
remote_identifier = self.awsecr_identifier
else:
raise ValueError
try:
run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier])
docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name)
logger.info(" Success!")
if self.is_state_exists:
self.update(
actions=[
ImageModel.md5.set(self.md5),
ImageModel.last_update.set(self.last_update)
]
)
else:
self.save()
return True
except subprocess.CalledProcessError as e:
logger.info(" Push failed!")
logger.info(" {}".format(e))
return False
except Exception as e:
logger.info(" {}".format(e))
return False
ImageModel.create_table(billing_mode="PAY_PER_REQUEST")
def plan_image_to_build():
"""
:rtype: typing.List[ImageModel]
:return:
"""
logger.info("Scan code repo to scheduler docker build ...")
image_list = list()
for repo_folder in os.listdir(DIR_REPOS):
dir_repo_root = os.path.join(DIR_REPOS, repo_folder)
if not os.path.isdir(dir_repo_root):
continue
for tag_folder in os.listdir(dir_repo_root):
dir_tag_root = os.path.join(dir_repo_root, tag_folder)
image = ImageModel()
image.dir_repo_root = dir_repo_root
image.dir_tag_root = dir_tag_root
if not image.has_dockerfile():
continue
logger.info(f" Detected '{image.local_identifier}' image")
try:
_image = ImageModel.get(image.local_identifier) # type: ImageModel
image.identifier = _image.identifier
image.md5 = _image.md5
image.last_update = _image.last_update
_image.is_state_exists = True
if image.md5 == image.dockerfile_md5:
if (datetime.utcnow() - image.last_update_datetime).total_seconds() > IMAGE_REBUILD_INTERVAL:
is_todo = True
logger.info(
" Dockerfile not changed, but it is out dated "
"due to the IMAGE_REBUILD_INTERVAL setting, we need to build this one")
else:
is_todo = False
logger.info(" Dockerfile not changed, and not beyond the IMAGE_REBUILD_INTERVAL setting")
logger.info(" skip this image")
else:
is_todo = True
logger.info(" Dockerfile has changed, we need to rebuild the image")
except DoesNotExist:
logger.info(" State not exists, we need to build this one")
is_todo = True
image.identifier = image.local_identifier
image.md5 = image.dockerfile_md5
image.is_state_exists = False
except Exception as e:
raise e
if is_todo:
image_list.append(image)
logger.info("--- build plan summary ---")
if len(image_list):
logger.info("we got these images to build")
for image in image_list:
logger.info(f" {image.local_identifier}")
else:
logger.info("we got NO image to build")
return image_list
def run_and_log_command(commands):
logger.info("Run >>> {}".format(" ".join(commands)))
subprocess.check_output(commands)
def run_build_image(image_list):
"""
Build and test Images.
:type image_list: typing.List[ImageModel]
:param image_list:
:rtype: typing.Tuple[typing.List[ImageModel]]
:return:
"""
success_image_list = list()
failed_image_list = list()
for image in image_list:
docker_build_success_flag = image.run_docker_build()
if not docker_build_success_flag:
failed_image_list.append(image)
continue
smoke_test_sccess_flag = image.run_smoke_test()
if not smoke_test_sccess_flag:
failed_image_list.append(image)
continue
success_image_list.append(image)
logger.info("--- docker build summary ---")
logger.info("following image succeed:")
for image in success_image_list:
logger.info(f" {image.local_identifier}")
logger.info("following image failed:")
for image in failed_image_list:
logger.info(f" {image.local_identifier}")
return success_image_list, failed_image_list
def run_docker_push(image_list, docker_client):
"""
Push built images to registry.
:type image_list: typing.List[ImageModel]
:param image_list:
"""
logger.info("--- push image to registry ---")
if runtime == Runtime.local:
logger.info("Detected local runtime, stop here.")
return
if not len(success_image_list):
logger.info("No success image to push, stop here.")
return
if GIT_BRANCH != "master":
logger.info("Not master branch, stop here")
return
docker_client.login(username=DOCKER_HUB_USERNAME, password=DOCKER_HUB_PASSWORD)
for image in image_list:
image.run_docker_push(docker_client)
print("Finished.")
if __name__ == "__main__":
todo_image_list = plan_image_to_build()
success_image_list, failed_image_list = run_build_image(todo_image_list)
run_docker_push(success_image_list, docker_client)
| strip_comments | identifier_name |
build_all.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run docker build -> test -> push workflow for all repositories and images.
"""
import hashlib
import json
import logging
import os
import re
import subprocess
from datetime import datetime
import docker
from pynamodb.attributes import UnicodeAttribute
from pynamodb.exceptions import DoesNotExist
from pynamodb.models import Model
def create_logger():
logger = logging.getLogger("ci-runner")
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
# --- Json utility ---
def read_text(path):
with open(path, "rb") as f:
content = f.read().decode("utf-8")
return content
def strip_comment_line_with_symbol(line, start):
"""
Strip comments from line string.
"""
parts = line.split(start)
counts = [len(re.findall(r'(?:^|[^"\\]|(?:\\\\|\\")+)(")', part))
for part in parts]
total = 0
for nr, count in enumerate(counts):
total += count
if total % 2 == 0:
return start.join(parts[:nr + 1]).rstrip()
else: # pragma: no cover
return line.rstrip()
def strip_comments(string, comment_symbols=frozenset(('#', '//'))):
"""
Strip comments from json string.
:param string: A string containing json with comments started by comment_symbols.
:param comment_symbols: Iterable of symbols that start a line comment (default # or //).
:return: The string with the comments removed.
"""
lines = string.splitlines()
for k in range(len(lines)):
for symbol in comment_symbols:
lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)
return '\n'.join(lines)
def read_json(file_path):
"""
Read dict data from json file
:type file_path: str
:rtype: dict
"""
return json.loads(strip_comments(read_text(file_path)))
def get_json_value(file_path, json_path):
"""
Read specific field from JSON file.
:type file_path: str
:param file_path: the absolute path for a json file
:type json_path: str
:param json_path: json path notation.
"""
# find absolute path
cwd = os.getcwd()
if not os.path.isabs(file_path):
file_path = os.path.abspath(os.path.join(cwd, file_path))
# fix json_path
if json_path.startswith("$."):
json_path = json_path.replace("$.", "", 1)
with open(file_path, "rb") as f:
data = json.loads(strip_comments(f.read().decode("utf-8")))
value = data
for part in json_path.split("."):
if part in value:
value = value[part]
else:
raise ValueError("'$.{}' not found in {}".format(json_path, file_path))
return value
def get_dockerfile_md5(dockerfile_path):
"""
Get md5 check sum of a dockerfile, comments, empty line, tailing space
are ignored.
:param dockerfile_path: the absolute path of the Dockerfile
:rtype: str
"""
valid_lines = list()
with open(dockerfile_path, "rb") as f:
lines = f.read().decode("utf-8").split("\n")
for line in lines:
line = line.rstrip()
# ignore comment line
if line.startswith("#"):
continue
# ignore empty line
if not bool(line):
continue
# trim tailing comment
if "#" in line:
line = line[:-(line[::-1].index("#") + 1)].rstrip()
if line:
valid_lines.append(line)
else:
valid_lines.append(line)
md5 = hashlib.md5()
md5.update("\n".join(valid_lines).encode("utf-8"))
return md5.hexdigest()
DIR_HERE = os.path.abspath(os.path.dirname(__file__))
DIR_PROJECT_ROOT = DIR_HERE
DIR_CICD = DIR_HERE
DIR_REPOS = os.path.join(DIR_PROJECT_ROOT, "repos")
# config file path
PATH_GLOBAL_CONFIG = os.path.join(DIR_PROJECT_ROOT, "config.json")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
REGISTRY_SERVICE = GLOBAL_CONFIG["REGISTRY_SERVICE"]
PATH_DOCKER_HUB_SECRET = os.path.join(DIR_PROJECT_ROOT, "docker-hub-secret.json")
DOCKER_HUB_USERNAME = GLOBAL_CONFIG["DOCKER_HUB_USERNAME"]
IMAGE_REBUILD_INTERVAL = get_json_value(PATH_GLOBAL_CONFIG, "IMAGE_REBUILD_INTERVAL")
GLOBAL_CONFIG = read_json(PATH_GLOBAL_CONFIG)
# --- Load Configs ---
# detect runtime
class Runtime:
local = "local"
circleci = "circleci"
if os.environ.get("CIRCLECI"):
runtime = Runtime.circleci
else:
runtime = Runtime.local
# resolve config
if runtime == Runtime.local:
AWS_REGION = get_json_value(PATH_GLOBAL_CONFIG, "AWS_REGION")
AWS_PROFILE = get_json_value(PATH_GLOBAL_CONFIG, "AWS_PROFILE")
# set environment variable, allow pynamodb to detect credential
os.environ["AWS_DEFAULT_PROFILE"] = AWS_PROFILE
os.environ["AWS_DEFAULT_REGION"] = AWS_REGION
try:
DOCKER_HUB_PASSWORD = get_json_value(PATH_DOCKER_HUB_SECRET, "PASSWORD")
except:
DOCKER_HUB_PASSWORD = ""
GIT_BRANCH = ""
elif runtime == Runtime.circleci:
AWS_REGION = os.environ["AWS_DEFAULT_REGION"]
AWS_PROFILE = None
DOCKER_HUB_PASSWORD = os.environ["DOCKER_HUB_PASS"]
GIT_BRANCH = os.environ["CIRCLE_BRANCH"]
else:
raise NotImplementedError
logger = create_logger()
docker_client = docker.from_env()
# --- Image State DynamoDB backend
class ImageModel(Model):
class Meta:
table_name = "docker-image-state"
region = AWS_REGION
identifier = UnicodeAttribute(hash_key=True)
md5 = UnicodeAttribute()
last_update = UnicodeAttribute()
@property
def last_update_datetime(self):
|
dockerhub_username = DOCKER_HUB_USERNAME # type: str
dir_repo_root = None # type: str
dir_tag_root = None # type: str
is_state_exists = None # type: bool
_repo_name = None # type: str
@property
def repo_name(self):
if self._repo_name is None:
self._repo_name = read_text(os.path.join(self.dir_repo_root, "repo_name")).strip()
return self._repo_name
_tag_name = None # type: str
@property
def tag_name(self):
if self._tag_name is None:
self._tag_name = read_text(os.path.join(self.dir_tag_root, "tag_name")).strip()
return self._tag_name
@property
def dockerfile_path(self):
return os.path.join(self.dir_tag_root, "Dockerfile")
def has_dockerfile(self):
return os.path.exists(self.dockerfile_path)
_dockerfile_md5 = None # type: str
@property
def dockerfile_md5(self):
if self._dockerfile_md5 is None:
self._dockerfile_md5 = get_dockerfile_md5(self.dockerfile_path)
return self._dockerfile_md5
@property
def local_identifier(self):
return f"{self.repo_name}:{self.tag_name}"
@property
def dockerhub_identifier(self):
return f"{self.dockerhub_username}/{self.repo_name}:{self.tag_name}"
@property
def awsecr_identifier(self):
raise NotImplementedError
@property
def smoke_test_script_path(self):
return os.path.join(self.dir_tag_root, "smoke-test.sh")
def run_docker_build(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Build docker image in context at {self.dir_tag_root} ...")
try:
run_and_log_command(["docker", "build", "-t", self.local_identifier, self.dir_tag_root])
self.last_update = str(datetime.utcnow())
logger.info(" Build success!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Build failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_smoke_test(self):
"""
:rtype: bool
:return:
"""
logger.info(f"Run smoke test script {self.smoke_test_script_path}...")
try:
run_and_log_command(["bash", self.smoke_test_script_path])
logger.info(" Test passed!")
return True
except subprocess.CalledProcessError as e:
logger.info(" Test failed!")
logger.info(" {}".format(e))
return False
except Exception:
return False
def run_docker_push(self, docker_client):
logger.info(f"Push docker image {self.identifier} ...")
if REGISTRY_SERVICE == "dockerhub":
remote_identifier = self.dockerhub_identifier
elif REGISTRY_SERVICE == "awsecr":
remote_identifier = self.awsecr_identifier
else:
raise ValueError
try:
run_and_log_command(["docker", "tag", self.local_identifier, remote_identifier])
docker_client.push(f"{self.dockerhub_username}/{self.repo_name}", self.tag_name)
logger.info(" Success!")
if self.is_state_exists:
self.update(
actions=[
ImageModel.md5.set(self.md5),
ImageModel.last_update.set(self.last_update)
]
)
else:
self.save()
return True
except subprocess.CalledProcessError as e:
logger.info(" Push failed!")
logger.info(" {}".format(e))
return False
except Exception as e:
logger.info(" {}".format(e))
return False
ImageModel.create_table(billing_mode="PAY_PER_REQUEST")
def plan_image_to_build():
"""
:rtype: typing.List[ImageModel]
:return:
"""
logger.info("Scan code repo to scheduler docker build ...")
image_list = list()
for repo_folder in os.listdir(DIR_REPOS):
dir_repo_root = os.path.join(DIR_REPOS, repo_folder)
if not os.path.isdir(dir_repo_root):
continue
for tag_folder in os.listdir(dir_repo_root):
dir_tag_root = os.path.join(dir_repo_root, tag_folder)
image = ImageModel()
image.dir_repo_root = dir_repo_root
image.dir_tag_root = dir_tag_root
if not image.has_dockerfile():
continue
logger.info(f" Detected '{image.local_identifier}' image")
try:
_image = ImageModel.get(image.local_identifier) # type: ImageModel
image.identifier = _image.identifier
image.md5 = _image.md5
image.last_update = _image.last_update
_image.is_state_exists = True
if image.md5 == image.dockerfile_md5:
if (datetime.utcnow() - image.last_update_datetime).total_seconds() > IMAGE_REBUILD_INTERVAL:
is_todo = True
logger.info(
" Dockerfile not changed, but it is out dated "
"due to the IMAGE_REBUILD_INTERVAL setting, we need to build this one")
else:
is_todo = False
logger.info(" Dockerfile not changed, and not beyond the IMAGE_REBUILD_INTERVAL setting")
logger.info(" skip this image")
else:
is_todo = True
logger.info(" Dockerfile has changed, we need to rebuild the image")
except DoesNotExist:
logger.info(" State not exists, we need to build this one")
is_todo = True
image.identifier = image.local_identifier
image.md5 = image.dockerfile_md5
image.is_state_exists = False
except Exception as e:
raise e
if is_todo:
image_list.append(image)
logger.info("--- build plan summary ---")
if len(image_list):
logger.info("we got these images to build")
for image in image_list:
logger.info(f" {image.local_identifier}")
else:
logger.info("we got NO image to build")
return image_list
def run_and_log_command(commands):
logger.info("Run >>> {}".format(" ".join(commands)))
subprocess.check_output(commands)
def run_build_image(image_list):
"""
Build and test Images.
:type image_list: typing.List[ImageModel]
:param image_list:
:rtype: typing.Tuple[typing.List[ImageModel]]
:return:
"""
success_image_list = list()
failed_image_list = list()
for image in image_list:
docker_build_success_flag = image.run_docker_build()
if not docker_build_success_flag:
failed_image_list.append(image)
continue
smoke_test_sccess_flag = image.run_smoke_test()
if not smoke_test_sccess_flag:
failed_image_list.append(image)
continue
success_image_list.append(image)
logger.info("--- docker build summary ---")
logger.info("following image succeed:")
for image in success_image_list:
logger.info(f" {image.local_identifier}")
logger.info("following image failed:")
for image in failed_image_list:
logger.info(f" {image.local_identifier}")
return success_image_list, failed_image_list
def run_docker_push(image_list, docker_client):
"""
Push built images to registry.
:type image_list: typing.List[ImageModel]
:param image_list:
"""
logger.info("--- push image to registry ---")
if runtime == Runtime.local:
logger.info("Detected local runtime, stop here.")
return
if not len(success_image_list):
logger.info("No success image to push, stop here.")
return
if GIT_BRANCH != "master":
logger.info("Not master branch, stop here")
return
docker_client.login(username=DOCKER_HUB_USERNAME, password=DOCKER_HUB_PASSWORD)
for image in image_list:
image.run_docker_push(docker_client)
print("Finished.")
if __name__ == "__main__":
todo_image_list = plan_image_to_build()
success_image_list, failed_image_list = run_build_image(todo_image_list)
run_docker_push(success_image_list, docker_client)
| """
datetime type of ``last_update``
"""
return datetime.strptime(self.last_update, "%Y-%m-%d %H:%M:%S.%f") | identifier_body |
admission_test.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedulingpolicy
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/user"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
)
func TestNewAdmissionController(t *testing.T) {
tempfile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("Unexpected error while creating temporary file: %v", err)
}
p := tempfile.Name()
defer os.Remove(p)
kubeconfig := `
clusters:
- name: foo
cluster:
server: https://example.com
users:
- name: alice
user:
token: deadbeef
contexts:
- name: default
context:
cluster: foo
user: alice
current-context: default
`
if _, err := tempfile.WriteString(kubeconfig); err != nil {
t.Fatalf("Unexpected error while writing test kubeconfig file: %v", err)
}
tests := []struct {
note string
input string
wantErr bool
}{
{"no config", "", true},
{"bad json", `{"foo": `, true},
{"bad yaml", `{foo" `, true},
{
"missing kubeconfig",
`{"foo": {}}`,
true,
},
{
"kubeconfig not found",
`{
"kubeconfig": "/kube-federation-scheduling-policy-file-not-found-test"
}`,
true,
},
{
"bad retry backoff",
fmt.Sprintf(`
{
"kubeconfig": %q,
"retryBackoff": -1
}
`, p),
true,
},
{
"a valid config",
fmt.Sprintf(`
{
"kubeconfig": %q
}
`, p),
false,
},
{
"a valid config with retry backoff",
fmt.Sprintf(`
{
"kubeconfig": %q,
"retryBackoff": 200
}
`, p),
false,
},
}
for _, tc := range tests {
var file io.Reader
if tc.input == "" {
file = nil
} else {
file = bytes.NewBufferString(tc.input)
}
_, err := newAdmissionController(file)
if tc.wantErr && err == nil {
t.Errorf("%v: Expected error", tc.note)
} else if !tc.wantErr && err != nil {
t.Errorf("%v: Unexpected error: %v", tc.note, err)
}
}
}
func TestAdmitQueryPayload(t *testing.T) {
var body interface{}
serve := func(w http.ResponseWriter, r *http.Request) {
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
t.Fatalf("Unexpected error reading admission payload: %v", err)
}
// No errors or annotations.
w.Write([]byte(`{}`))
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
rs := makeReplicaSet()
rs.Spec.MinReadySeconds = 100
attrs := makeAdmissionRecord(rs)
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
obj := body.(map[string]interface{})
metadata := obj["metadata"].(map[string]interface{})
spec := obj["spec"].(map[string]interface{})
name := metadata["name"].(string)
minReadySeconds := spec["minReadySeconds"].(float64)
expectedName := "myapp"
if name != expectedName {
t.Fatalf("Expected replicaset.metadata.name to be %v but got: %v", expectedName, name)
}
expectedMinReadySeconds := float64(100)
if minReadySeconds != expectedMinReadySeconds {
t.Fatalf("Expected replicaset.spec.minReadySeconds to be %v but got: %v", expectedMinReadySeconds, minReadySeconds)
}
}
func TestAdmitFailInternal(t *testing.T) {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
}
controller, err := newControllerWithTestServer(serve, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
mockClient := &fake.Clientset{}
mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("unknown error")
})
controller.SetInternalKubeClientSet(mockClient)
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
}
func TestAdmitPolicyDoesNotExist(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404)
}, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Expected admission controller to fail open but got error: %v", err)
}
}
func TestAdmitFailClosed(t *testing.T) {
tests := []struct {
note string
statusCode int
body string
}{
{"server error", 500, ""},
{"unmarshal error", 200, "{"},
{"undefined result", 404, ``},
{"policy errors", 200, `{"errors": ["conflicting replica-set-preferences"]}`},
}
for _, tc := range tests {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tc.statusCode)
if len(tc.body) > 0 |
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Errorf("%v: Unexpected error while creating test admission controller/server: %v", tc.note, err)
continue
}
obj := makeReplicaSet()
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err == nil {
t.Errorf("%v: Expected admission controller to fail closed", tc.note)
}
}
}
func TestAdmitRetries(t *testing.T) {
var numQueries int
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
numQueries++
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
err = controller.Admit(makeAdmissionRecord(makeReplicaSet()))
if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
if numQueries <= 1 {
t.Fatalf("Expected multiple queries/retries but got (numQueries): %v", numQueries)
}
}
func TestAdmitSuccessWithAnnotationMerge(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`
{
"annotations": {
"foo": "bar-2"
}
}
`))
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
obj := makeReplicaSet()
obj.Annotations = map[string]string{}
obj.Annotations["foo"] = "bar"
obj.Annotations["bar"] = "baz"
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
annotations := attrs.GetObject().(*extensionsv1.ReplicaSet).Annotations
expected := map[string]string{
"foo": "bar-2",
"bar": "baz",
}
if !reflect.DeepEqual(annotations, expected) {
t.Fatalf("Expected annotations to be %v but got: %v", expected, annotations)
}
}
func newControllerWithTestServer(f func(w http.ResponseWriter, r *http.Request), policiesExist bool) (*admissionController, error) {
server, err := newTestServer(f)
if err != nil {
return nil, err
}
kubeConfigFile, err := makeKubeConfigFile(server.URL, "/some/path/to/decision")
if err != nil {
return nil, err
}
defer os.Remove(kubeConfigFile)
configFile, err := makeAdmissionControlConfigFile(kubeConfigFile)
if err != nil {
return nil, err
}
defer os.Remove(configFile)
file, err := os.Open(configFile)
if err != nil {
return nil, err
}
controller, err := newAdmissionController(file)
if err != nil {
return nil, err
}
mockClient := &fake.Clientset{}
var items []api.ConfigMap
if policiesExist {
items = append(items, api.ConfigMap{})
}
mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
if action.GetNamespace() == policyConfigMapNamespace {
return true, &api.ConfigMapList{Items: items}, nil
}
return true, nil, nil
})
controller.SetInternalKubeClientSet(mockClient)
return controller, nil
}
func newTestServer(f func(w http.ResponseWriter, r *http.Request)) (*httptest.Server, error) {
server := httptest.NewUnstartedServer(http.HandlerFunc(f))
server.Start()
return server, nil
}
func makeAdmissionControlConfigFile(kubeConfigFile string) (string, error) {
tempfile, err := ioutil.TempFile("", "")
if err != nil {
return "", err
}
p := tempfile.Name()
configFileTmpl := `
kubeconfig: {{ .KubeConfigFile }}
retryBackoff: {{ .RetryBackoff }}
`
type configFileTemplateInput struct {
KubeConfigFile string
RetryBackoff int
}
input := configFileTemplateInput{
KubeConfigFile: kubeConfigFile,
RetryBackoff: 1,
}
tmpl, err := template.New("scheduling-policy-config").Parse(configFileTmpl)
if err != nil {
return "", err
}
if err := tmpl.Execute(tempfile, input); err != nil {
return "", err
}
return p, nil
}
func makeKubeConfigFile(baseURL, path string) (string, error) {
tempfile, err := ioutil.TempFile("", "")
if err != nil {
return "", err
}
p := tempfile.Name()
kubeConfigTmpl := `
clusters:
- name: test
cluster:
server: {{ .BaseURL }}{{ .Path }}
users:
- name: alice
user:
token: deadbeef
contexts:
- name: default
context:
cluster: test
user: alice
current-context: default`
type kubeConfigTemplateInput struct {
BaseURL string
Path string
}
input := kubeConfigTemplateInput{
BaseURL: baseURL,
Path: path,
}
tmpl, err := template.New("kubeconfig").Parse(kubeConfigTmpl)
if err != nil {
return "", err
}
if err := tmpl.Execute(tempfile, input); err != nil {
return "", err
}
return p, nil
}
func makeAdmissionRecord(obj *extensionsv1.ReplicaSet) admission.Attributes {
return admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
}
func makeReplicaSet() *extensionsv1.ReplicaSet {
return &extensionsv1.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "myapp",
},
Spec: extensionsv1.ReplicaSetSpec{},
}
}
| {
w.Write([]byte(tc.body))
} | conditional_block |
admission_test.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedulingpolicy
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/user"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
)
func TestNewAdmissionController(t *testing.T) {
tempfile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("Unexpected error while creating temporary file: %v", err)
}
p := tempfile.Name()
defer os.Remove(p)
kubeconfig := `
clusters:
- name: foo
cluster:
server: https://example.com
users:
- name: alice
user:
token: deadbeef
contexts:
- name: default
context:
cluster: foo
user: alice
current-context: default
`
if _, err := tempfile.WriteString(kubeconfig); err != nil {
t.Fatalf("Unexpected error while writing test kubeconfig file: %v", err)
}
tests := []struct {
note string
input string
wantErr bool
}{
{"no config", "", true},
{"bad json", `{"foo": `, true},
{"bad yaml", `{foo" `, true},
{
"missing kubeconfig",
`{"foo": {}}`,
true,
},
{
"kubeconfig not found",
`{
"kubeconfig": "/kube-federation-scheduling-policy-file-not-found-test"
}`,
true,
},
{
"bad retry backoff",
fmt.Sprintf(`
{
"kubeconfig": %q,
"retryBackoff": -1
}
`, p),
true,
},
{
"a valid config",
fmt.Sprintf(`
{
"kubeconfig": %q
}
`, p),
false,
},
{
"a valid config with retry backoff",
fmt.Sprintf(`
{
"kubeconfig": %q,
"retryBackoff": 200
}
`, p),
false,
},
}
for _, tc := range tests {
var file io.Reader
if tc.input == "" {
file = nil
} else {
file = bytes.NewBufferString(tc.input)
}
_, err := newAdmissionController(file)
if tc.wantErr && err == nil {
t.Errorf("%v: Expected error", tc.note)
} else if !tc.wantErr && err != nil {
t.Errorf("%v: Unexpected error: %v", tc.note, err)
}
}
}
func TestAdmitQueryPayload(t *testing.T) {
var body interface{}
serve := func(w http.ResponseWriter, r *http.Request) {
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
t.Fatalf("Unexpected error reading admission payload: %v", err)
}
// No errors or annotations.
w.Write([]byte(`{}`))
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
rs := makeReplicaSet()
rs.Spec.MinReadySeconds = 100
attrs := makeAdmissionRecord(rs)
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
obj := body.(map[string]interface{})
metadata := obj["metadata"].(map[string]interface{})
spec := obj["spec"].(map[string]interface{})
name := metadata["name"].(string)
minReadySeconds := spec["minReadySeconds"].(float64)
expectedName := "myapp"
if name != expectedName {
t.Fatalf("Expected replicaset.metadata.name to be %v but got: %v", expectedName, name)
}
expectedMinReadySeconds := float64(100)
if minReadySeconds != expectedMinReadySeconds {
t.Fatalf("Expected replicaset.spec.minReadySeconds to be %v but got: %v", expectedMinReadySeconds, minReadySeconds)
}
}
func TestAdmitFailInternal(t *testing.T) {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
}
controller, err := newControllerWithTestServer(serve, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
mockClient := &fake.Clientset{}
mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("unknown error")
})
controller.SetInternalKubeClientSet(mockClient)
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
}
func TestAdmitPolicyDoesNotExist(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404)
}, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Expected admission controller to fail open but got error: %v", err)
}
}
func TestAdmitFailClosed(t *testing.T) {
tests := []struct {
note string
statusCode int
body string
}{
{"server error", 500, ""},
{"unmarshal error", 200, "{"},
{"undefined result", 404, ``},
{"policy errors", 200, `{"errors": ["conflicting replica-set-preferences"]}`},
}
for _, tc := range tests {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tc.statusCode)
if len(tc.body) > 0 {
w.Write([]byte(tc.body))
}
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Errorf("%v: Unexpected error while creating test admission controller/server: %v", tc.note, err)
continue
}
obj := makeReplicaSet()
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err == nil {
t.Errorf("%v: Expected admission controller to fail closed", tc.note)
}
}
}
func TestAdmitRetries(t *testing.T) {
var numQueries int
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
numQueries++
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
err = controller.Admit(makeAdmissionRecord(makeReplicaSet()))
if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
if numQueries <= 1 {
t.Fatalf("Expected multiple queries/retries but got (numQueries): %v", numQueries)
}
}
func TestAdmitSuccessWithAnnotationMerge(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`
{
"annotations": {
"foo": "bar-2"
}
}
`))
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
obj := makeReplicaSet()
obj.Annotations = map[string]string{}
obj.Annotations["foo"] = "bar"
obj.Annotations["bar"] = "baz"
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
annotations := attrs.GetObject().(*extensionsv1.ReplicaSet).Annotations
expected := map[string]string{
"foo": "bar-2",
"bar": "baz",
}
if !reflect.DeepEqual(annotations, expected) {
t.Fatalf("Expected annotations to be %v but got: %v", expected, annotations)
}
}
func newControllerWithTestServer(f func(w http.ResponseWriter, r *http.Request), policiesExist bool) (*admissionController, error) {
server, err := newTestServer(f)
if err != nil {
return nil, err
}
kubeConfigFile, err := makeKubeConfigFile(server.URL, "/some/path/to/decision")
if err != nil {
return nil, err |
configFile, err := makeAdmissionControlConfigFile(kubeConfigFile)
if err != nil {
return nil, err
}
defer os.Remove(configFile)
file, err := os.Open(configFile)
if err != nil {
return nil, err
}
controller, err := newAdmissionController(file)
if err != nil {
return nil, err
}
mockClient := &fake.Clientset{}
var items []api.ConfigMap
if policiesExist {
items = append(items, api.ConfigMap{})
}
mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
if action.GetNamespace() == policyConfigMapNamespace {
return true, &api.ConfigMapList{Items: items}, nil
}
return true, nil, nil
})
controller.SetInternalKubeClientSet(mockClient)
return controller, nil
}
func newTestServer(f func(w http.ResponseWriter, r *http.Request)) (*httptest.Server, error) {
server := httptest.NewUnstartedServer(http.HandlerFunc(f))
server.Start()
return server, nil
}
func makeAdmissionControlConfigFile(kubeConfigFile string) (string, error) {
tempfile, err := ioutil.TempFile("", "")
if err != nil {
return "", err
}
p := tempfile.Name()
configFileTmpl := `
kubeconfig: {{ .KubeConfigFile }}
retryBackoff: {{ .RetryBackoff }}
`
type configFileTemplateInput struct {
KubeConfigFile string
RetryBackoff int
}
input := configFileTemplateInput{
KubeConfigFile: kubeConfigFile,
RetryBackoff: 1,
}
tmpl, err := template.New("scheduling-policy-config").Parse(configFileTmpl)
if err != nil {
return "", err
}
if err := tmpl.Execute(tempfile, input); err != nil {
return "", err
}
return p, nil
}
func makeKubeConfigFile(baseURL, path string) (string, error) {
tempfile, err := ioutil.TempFile("", "")
if err != nil {
return "", err
}
p := tempfile.Name()
kubeConfigTmpl := `
clusters:
- name: test
cluster:
server: {{ .BaseURL }}{{ .Path }}
users:
- name: alice
user:
token: deadbeef
contexts:
- name: default
context:
cluster: test
user: alice
current-context: default`
type kubeConfigTemplateInput struct {
BaseURL string
Path string
}
input := kubeConfigTemplateInput{
BaseURL: baseURL,
Path: path,
}
tmpl, err := template.New("kubeconfig").Parse(kubeConfigTmpl)
if err != nil {
return "", err
}
if err := tmpl.Execute(tempfile, input); err != nil {
return "", err
}
return p, nil
}
func makeAdmissionRecord(obj *extensionsv1.ReplicaSet) admission.Attributes {
return admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
}
func makeReplicaSet() *extensionsv1.ReplicaSet {
return &extensionsv1.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
APIVersion: "extensions/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "myapp",
},
Spec: extensionsv1.ReplicaSetSpec{},
}
} | }
defer os.Remove(kubeConfigFile) | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.