file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
server.go | // Copyright (c) 2017-2020 VMware, Inc. or its affiliates
// SPDX-License-Identifier: Apache-2.0
package hub
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/greenplum-db/gp-common-go-libs/gplog"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"golang.org/x/xerrors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/reflection"
grpcStatus "google.golang.org/grpc/status"
"github.com/greenplum-db/gpupgrade/greenplum"
"github.com/greenplum-db/gpupgrade/idl"
"github.com/greenplum-db/gpupgrade/upgrade"
"github.com/greenplum-db/gpupgrade/utils"
"github.com/greenplum-db/gpupgrade/utils/daemon"
"github.com/greenplum-db/gpupgrade/utils/log"
)
var DialTimeout = 3 * time.Second
// Returned from Server.Start() if Server.Stop() has already been called.
var ErrHubStopped = errors.New("hub is stopped")
type Dialer func(ctx context.Context, target string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
type Server struct {
*Config
StateDir string
agentConns []*Connection
grpcDialer Dialer
mu sync.Mutex
server *grpc.Server
lis net.Listener
// This is used both as a channel to communicate from Start() to
// Stop() to indicate to Stop() that it can finally terminate
// and also as a flag to communicate from Stop() to Start() that
// Stop() had already beed called, so no need to do anything further
// in Start().
// Note that when used as a flag, nil value means that Stop() has
// been called.
stopped chan struct{}
daemon bool
}
type Connection struct {
Conn *grpc.ClientConn
AgentClient idl.AgentClient
Hostname string
CancelContext func()
}
func New(conf *Config, grpcDialer Dialer, stateDir string) *Server {
h := &Server{
Config: conf,
StateDir: stateDir,
stopped: make(chan struct{}, 1),
grpcDialer: grpcDialer,
}
return h
}
// MakeDaemon tells the Server to disconnect its stdout/stderr streams after
// successfully starting up.
func (s *Server) MakeDaemon() {
s.daemon = true
}
func (s *Server) Start() error {
lis, err := net.Listen("tcp", ":"+strconv.Itoa(s.Port))
if err != nil {
return xerrors.Errorf("listen on port %d: %w", s.Port, err)
}
// Set up an interceptor function to log any panics we get from request
// handlers.
interceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
defer log.WritePanics()
return handler(ctx, req)
}
server := grpc.NewServer(grpc.UnaryInterceptor(interceptor))
s.mu.Lock()
if s.stopped == nil {
// Stop() has already been called; return without serving.
s.mu.Unlock()
return ErrHubStopped
}
s.server = server
s.lis = lis
s.mu.Unlock()
idl.RegisterCliToHubServer(server, s)
reflection.Register(server)
if s.daemon {
fmt.Printf("Hub started on port %d (pid %d)\n", s.Port, os.Getpid())
daemon.Daemonize()
}
err = server.Serve(lis)
if err != nil {
err = xerrors.Errorf("serve: %w", err)
}
// inform Stop() that is it is OK to stop now
s.stopped <- struct{}{}
return err
}
func (s *Server) StopServices(ctx context.Context, in *idl.StopServicesRequest) (*idl.StopServicesReply, error) {
err := s.StopAgents()
if err != nil {
gplog.Debug("failed to stop agents: %#v", err)
}
s.Stop(false)
return &idl.StopServicesReply{}, nil
}
// TODO: add unit tests for this; this is currently tricky due to h.AgentConns()
// mutating global state
func (s *Server) StopAgents() error {
// FIXME: s.AgentConns() fails fast if a single agent isn't available
// we need to connect to all available agents so we can stop just those
_, err := s.AgentConns()
if err != nil {
return err
}
var wg sync.WaitGroup
errs := make(chan error, len(s.agentConns))
for _, conn := range s.agentConns {
conn := conn
wg.Add(1)
go func() {
defer wg.Done()
_, err := conn.AgentClient.StopAgent(context.Background(), &idl.StopAgentRequest{})
if err == nil { // no error means the agent did not terminate as expected
errs <- xerrors.Errorf("failed to stop agent on host: %s", conn.Hostname)
return
}
// XXX: "transport is closing" is not documented but is needed to uniquely interpret codes.Unavailable
// https://github.com/grpc/grpc/blob/v1.24.0/doc/statuscodes.md
errStatus := grpcStatus.Convert(err)
if errStatus.Code() != codes.Unavailable || errStatus.Message() != "transport is closing" {
errs <- xerrors.Errorf("failed to stop agent on host %s : %w", conn.Hostname, err)
}
}()
}
wg.Wait()
close(errs)
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (s *Server) Stop(closeAgentConns bool) {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't try to
// start things up again.
s.stopped = nil
}
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context,
dialer func(context.Context, string) (net.Conn, error),
hostnames []string,
port int,
stateDir string) ([]string, error) {
var wg sync.WaitGroup
restartedHosts := make(chan string, len(hostnames))
errs := make(chan error, len(hostnames))
for _, host := range hostnames {
wg.Add(1)
go func(host string) {
defer wg.Done()
address := host + ":" + strconv.Itoa(port)
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 3*time.Second)
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
}
if dialer != nil {
opts = append(opts, grpc.WithContextDialer(dialer))
}
conn, err := grpc.DialContext(timeoutCtx, address, opts...)
cancelFunc()
if err == nil {
err = conn.Close()
if err != nil {
gplog.Error("failed to close agent connection to %s: %+v", host, err)
}
return
}
gplog.Debug("failed to dial agent on %s: %+v", host, err)
gplog.Info("starting agent on %s", host)
agentPath, err := getAgentPath()
if err != nil {
errs <- err
return
}
cmd := execCommand("ssh", host,
fmt.Sprintf("bash -c \"%s agent --daemonize --port %d --state-directory %s\"", agentPath, port, stateDir))
stdout, err := cmd.Output()
if err != nil {
errs <- err
return
}
gplog.Debug(string(stdout))
restartedHosts <- host
}(host)
}
wg.Wait()
close(errs)
close(restartedHosts)
var hosts []string
for h := range restartedHosts {
hosts = append(hosts, h)
}
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return hosts, multiErr.ErrorOrNil()
}
func (s *Server) AgentConns() ([]*Connection, error) {
// Lock the mutex to protect against races with Server.Stop().
// XXX This is a *ridiculously* broad lock. Have fun waiting for the dial
// timeout when calling Stop() and AgentConns() at the same time, for
// instance. We should not lock around a network operation, but it seems
// like the AgentConns concept is not long for this world anyway.
s.mu.Lock()
defer s.mu.Unlock()
if s.agentConns != nil {
err := EnsureConnsAreReady(s.agentConns)
if err != nil {
gplog.Error("ensureConnsAreReady failed: %s", err)
return nil, err
}
return s.agentConns, nil
}
hostnames := AgentHosts(s.Source)
for _, host := range hostnames {
ctx, cancelFunc := context.WithTimeout(context.Background(), DialTimeout)
conn, err := s.grpcDialer(ctx,
host+":"+strconv.Itoa(s.AgentPort),
grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
err = xerrors.Errorf("grpcDialer failed: %w", err)
gplog.Error(err.Error())
cancelFunc()
return nil, err
}
s.agentConns = append(s.agentConns, &Connection{
Conn: conn,
AgentClient: idl.NewAgentClient(conn),
Hostname: host,
CancelContext: cancelFunc,
})
}
return s.agentConns, nil
}
func EnsureConnsAreReady(agentConns []*Connection) error {
hostnames := []string{}
for _, conn := range agentConns {
if conn.Conn.GetState() != connectivity.Ready {
hostnames = append(hostnames, conn.Hostname)
}
}
if len(hostnames) > 0 {
return fmt.Errorf("the connections to the following hosts were not ready: %s", strings.Join(hostnames, ","))
}
return nil
}
// Closes all h.agentConns. Callers must hold the Server's mutex.
// TODO: this function assumes that all h.agentConns are _not_ in a terminal
// state(e.g. already closed). If so, conn.Conn.WaitForStateChange() can block
// indefinitely.
func (s *Server) closeAgentConns() {
for _, conn := range s.agentConns {
defer conn.CancelContext()
currState := conn.Conn.GetState()
err := conn.Conn.Close()
if err != nil {
gplog.Info(fmt.Sprintf("Error closing hub to agent connection. host: %s, err: %s", conn.Hostname, err.Error()))
}
conn.Conn.WaitForStateChange(context.Background(), currState)
}
}
type InitializeConfig struct {
Standby greenplum.SegConfig
Master greenplum.SegConfig
Primaries []greenplum.SegConfig
Mirrors []greenplum.SegConfig
}
// Config contains all the information that will be persisted to/loaded from
// from disk during calls to Save() and Load().
type Config struct {
Source *greenplum.Cluster
Target *greenplum.Cluster
// TargetInitializeConfig contains all the info needed to initialize the
// target cluster's master, standby, primaries and mirrors.
TargetInitializeConfig InitializeConfig
Port int
AgentPort int
UseLinkMode bool
UpgradeID upgrade.ID
// Tablespaces contains the tablespace in the database keyed by
// dbid and tablespace oid
Tablespaces greenplum.Tablespaces
TablespacesMappingFilePath string
}
func (c *Config) Load(r io.Reader) error {
dec := json.NewDecoder(r)
return dec.Decode(c)
}
func (c *Config) Save(w io.Writer) error {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(c)
}
// SaveConfig persists the hub's configuration to disk.
func (s *Server) SaveConfig() (err error) {
// TODO: Switch to an atomic implementation like renameio. Consider what
// happens if Config.Save() panics: we'll have truncated the file
// on disk and the hub will be unable to recover. For now, since we normally
// only save the configuration during initialize and any configuration
// errors could be fixed by reinitializing, the risk seems small.
file, err := utils.System.Create(upgrade.GetConfigFile())
if err != nil {
return err
}
defer func() {
if cerr := file.Close(); cerr != nil {
cerr = xerrors.Errorf("closing hub configuration: %w", cerr)
err = multierror.Append(err, cerr).ErrorOrNil()
}
}()
err = s.Config.Save(file)
if err != nil |
return nil
}
func LoadConfig(conf *Config, path string) error {
file, err := os.Open(path)
if err != nil {
return xerrors.Errorf("opening configuration file: %w", err)
}
defer file.Close()
err = conf.Load(file)
if err != nil {
return xerrors.Errorf("reading configuration file: %w", err)
}
return nil
}
func AgentHosts(c *greenplum.Cluster) []string {
uniqueHosts := make(map[string]bool)
excludingMaster := func(seg *greenplum.SegConfig) bool {
return !seg.IsMaster()
}
for _, seg := range c.SelectSegments(excludingMaster) {
uniqueHosts[seg.Hostname] = true
}
hosts := make([]string, 0)
for host := range uniqueHosts {
hosts = append(hosts, host)
}
return hosts
}
func MakeTargetClusterMessage(target *greenplum.Cluster) *idl.Message {
data := make(map[string]string)
data[idl.ResponseKey_target_port.String()] = strconv.Itoa(target.MasterPort())
data[idl.ResponseKey_target_master_data_directory.String()] = target.MasterDataDir()
return &idl.Message{
Contents: &idl.Message_Response{
Response: &idl.Response{Data: data},
},
}
}
| {
return xerrors.Errorf("saving hub configuration: %w", err)
} | conditional_block |
server.go | // Copyright (c) 2017-2020 VMware, Inc. or its affiliates
// SPDX-License-Identifier: Apache-2.0
package hub
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/greenplum-db/gp-common-go-libs/gplog"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"golang.org/x/xerrors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/reflection"
grpcStatus "google.golang.org/grpc/status"
"github.com/greenplum-db/gpupgrade/greenplum"
"github.com/greenplum-db/gpupgrade/idl"
"github.com/greenplum-db/gpupgrade/upgrade"
"github.com/greenplum-db/gpupgrade/utils"
"github.com/greenplum-db/gpupgrade/utils/daemon"
"github.com/greenplum-db/gpupgrade/utils/log"
)
var DialTimeout = 3 * time.Second
// Returned from Server.Start() if Server.Stop() has already been called.
var ErrHubStopped = errors.New("hub is stopped")
type Dialer func(ctx context.Context, target string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
type Server struct {
*Config
StateDir string
agentConns []*Connection
grpcDialer Dialer
mu sync.Mutex
server *grpc.Server
lis net.Listener
// This is used both as a channel to communicate from Start() to
// Stop() to indicate to Stop() that it can finally terminate
// and also as a flag to communicate from Stop() to Start() that
// Stop() had already beed called, so no need to do anything further
// in Start().
// Note that when used as a flag, nil value means that Stop() has
// been called.
stopped chan struct{}
daemon bool
}
type Connection struct {
Conn *grpc.ClientConn
AgentClient idl.AgentClient
Hostname string
CancelContext func()
}
func New(conf *Config, grpcDialer Dialer, stateDir string) *Server {
h := &Server{
Config: conf,
StateDir: stateDir,
stopped: make(chan struct{}, 1),
grpcDialer: grpcDialer,
}
return h
}
// MakeDaemon tells the Server to disconnect its stdout/stderr streams after
// successfully starting up.
func (s *Server) MakeDaemon() {
s.daemon = true
}
func (s *Server) Start() error {
lis, err := net.Listen("tcp", ":"+strconv.Itoa(s.Port))
if err != nil {
return xerrors.Errorf("listen on port %d: %w", s.Port, err)
}
// Set up an interceptor function to log any panics we get from request
// handlers.
interceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
defer log.WritePanics()
return handler(ctx, req)
}
server := grpc.NewServer(grpc.UnaryInterceptor(interceptor))
s.mu.Lock()
if s.stopped == nil {
// Stop() has already been called; return without serving.
s.mu.Unlock()
return ErrHubStopped
}
s.server = server
s.lis = lis
s.mu.Unlock()
idl.RegisterCliToHubServer(server, s)
reflection.Register(server)
if s.daemon {
fmt.Printf("Hub started on port %d (pid %d)\n", s.Port, os.Getpid())
daemon.Daemonize()
}
err = server.Serve(lis)
if err != nil {
err = xerrors.Errorf("serve: %w", err)
}
// inform Stop() that is it is OK to stop now
s.stopped <- struct{}{}
return err
}
func (s *Server) StopServices(ctx context.Context, in *idl.StopServicesRequest) (*idl.StopServicesReply, error) {
err := s.StopAgents()
if err != nil {
gplog.Debug("failed to stop agents: %#v", err)
}
s.Stop(false)
return &idl.StopServicesReply{}, nil
}
// TODO: add unit tests for this; this is currently tricky due to h.AgentConns()
// mutating global state
func (s *Server) StopAgents() error {
// FIXME: s.AgentConns() fails fast if a single agent isn't available
// we need to connect to all available agents so we can stop just those
_, err := s.AgentConns()
if err != nil {
return err
}
var wg sync.WaitGroup
errs := make(chan error, len(s.agentConns))
for _, conn := range s.agentConns {
conn := conn
wg.Add(1)
go func() {
defer wg.Done()
_, err := conn.AgentClient.StopAgent(context.Background(), &idl.StopAgentRequest{})
if err == nil { // no error means the agent did not terminate as expected
errs <- xerrors.Errorf("failed to stop agent on host: %s", conn.Hostname)
return
}
// XXX: "transport is closing" is not documented but is needed to uniquely interpret codes.Unavailable
// https://github.com/grpc/grpc/blob/v1.24.0/doc/statuscodes.md
errStatus := grpcStatus.Convert(err)
if errStatus.Code() != codes.Unavailable || errStatus.Message() != "transport is closing" {
errs <- xerrors.Errorf("failed to stop agent on host %s : %w", conn.Hostname, err)
}
}()
}
wg.Wait()
close(errs)
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (s *Server) Stop(closeAgentConns bool) |
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context,
dialer func(context.Context, string) (net.Conn, error),
hostnames []string,
port int,
stateDir string) ([]string, error) {
var wg sync.WaitGroup
restartedHosts := make(chan string, len(hostnames))
errs := make(chan error, len(hostnames))
for _, host := range hostnames {
wg.Add(1)
go func(host string) {
defer wg.Done()
address := host + ":" + strconv.Itoa(port)
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 3*time.Second)
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
}
if dialer != nil {
opts = append(opts, grpc.WithContextDialer(dialer))
}
conn, err := grpc.DialContext(timeoutCtx, address, opts...)
cancelFunc()
if err == nil {
err = conn.Close()
if err != nil {
gplog.Error("failed to close agent connection to %s: %+v", host, err)
}
return
}
gplog.Debug("failed to dial agent on %s: %+v", host, err)
gplog.Info("starting agent on %s", host)
agentPath, err := getAgentPath()
if err != nil {
errs <- err
return
}
cmd := execCommand("ssh", host,
fmt.Sprintf("bash -c \"%s agent --daemonize --port %d --state-directory %s\"", agentPath, port, stateDir))
stdout, err := cmd.Output()
if err != nil {
errs <- err
return
}
gplog.Debug(string(stdout))
restartedHosts <- host
}(host)
}
wg.Wait()
close(errs)
close(restartedHosts)
var hosts []string
for h := range restartedHosts {
hosts = append(hosts, h)
}
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return hosts, multiErr.ErrorOrNil()
}
func (s *Server) AgentConns() ([]*Connection, error) {
// Lock the mutex to protect against races with Server.Stop().
// XXX This is a *ridiculously* broad lock. Have fun waiting for the dial
// timeout when calling Stop() and AgentConns() at the same time, for
// instance. We should not lock around a network operation, but it seems
// like the AgentConns concept is not long for this world anyway.
s.mu.Lock()
defer s.mu.Unlock()
if s.agentConns != nil {
err := EnsureConnsAreReady(s.agentConns)
if err != nil {
gplog.Error("ensureConnsAreReady failed: %s", err)
return nil, err
}
return s.agentConns, nil
}
hostnames := AgentHosts(s.Source)
for _, host := range hostnames {
ctx, cancelFunc := context.WithTimeout(context.Background(), DialTimeout)
conn, err := s.grpcDialer(ctx,
host+":"+strconv.Itoa(s.AgentPort),
grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
err = xerrors.Errorf("grpcDialer failed: %w", err)
gplog.Error(err.Error())
cancelFunc()
return nil, err
}
s.agentConns = append(s.agentConns, &Connection{
Conn: conn,
AgentClient: idl.NewAgentClient(conn),
Hostname: host,
CancelContext: cancelFunc,
})
}
return s.agentConns, nil
}
func EnsureConnsAreReady(agentConns []*Connection) error {
hostnames := []string{}
for _, conn := range agentConns {
if conn.Conn.GetState() != connectivity.Ready {
hostnames = append(hostnames, conn.Hostname)
}
}
if len(hostnames) > 0 {
return fmt.Errorf("the connections to the following hosts were not ready: %s", strings.Join(hostnames, ","))
}
return nil
}
// Closes all h.agentConns. Callers must hold the Server's mutex.
// TODO: this function assumes that all h.agentConns are _not_ in a terminal
// state(e.g. already closed). If so, conn.Conn.WaitForStateChange() can block
// indefinitely.
func (s *Server) closeAgentConns() {
for _, conn := range s.agentConns {
defer conn.CancelContext()
currState := conn.Conn.GetState()
err := conn.Conn.Close()
if err != nil {
gplog.Info(fmt.Sprintf("Error closing hub to agent connection. host: %s, err: %s", conn.Hostname, err.Error()))
}
conn.Conn.WaitForStateChange(context.Background(), currState)
}
}
type InitializeConfig struct {
Standby greenplum.SegConfig
Master greenplum.SegConfig
Primaries []greenplum.SegConfig
Mirrors []greenplum.SegConfig
}
// Config contains all the information that will be persisted to/loaded from
// from disk during calls to Save() and Load().
type Config struct {
Source *greenplum.Cluster
Target *greenplum.Cluster
// TargetInitializeConfig contains all the info needed to initialize the
// target cluster's master, standby, primaries and mirrors.
TargetInitializeConfig InitializeConfig
Port int
AgentPort int
UseLinkMode bool
UpgradeID upgrade.ID
// Tablespaces contains the tablespace in the database keyed by
// dbid and tablespace oid
Tablespaces greenplum.Tablespaces
TablespacesMappingFilePath string
}
func (c *Config) Load(r io.Reader) error {
dec := json.NewDecoder(r)
return dec.Decode(c)
}
func (c *Config) Save(w io.Writer) error {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(c)
}
// SaveConfig persists the hub's configuration to disk.
func (s *Server) SaveConfig() (err error) {
// TODO: Switch to an atomic implementation like renameio. Consider what
// happens if Config.Save() panics: we'll have truncated the file
// on disk and the hub will be unable to recover. For now, since we normally
// only save the configuration during initialize and any configuration
// errors could be fixed by reinitializing, the risk seems small.
file, err := utils.System.Create(upgrade.GetConfigFile())
if err != nil {
return err
}
defer func() {
if cerr := file.Close(); cerr != nil {
cerr = xerrors.Errorf("closing hub configuration: %w", cerr)
err = multierror.Append(err, cerr).ErrorOrNil()
}
}()
err = s.Config.Save(file)
if err != nil {
return xerrors.Errorf("saving hub configuration: %w", err)
}
return nil
}
func LoadConfig(conf *Config, path string) error {
file, err := os.Open(path)
if err != nil {
return xerrors.Errorf("opening configuration file: %w", err)
}
defer file.Close()
err = conf.Load(file)
if err != nil {
return xerrors.Errorf("reading configuration file: %w", err)
}
return nil
}
func AgentHosts(c *greenplum.Cluster) []string {
uniqueHosts := make(map[string]bool)
excludingMaster := func(seg *greenplum.SegConfig) bool {
return !seg.IsMaster()
}
for _, seg := range c.SelectSegments(excludingMaster) {
uniqueHosts[seg.Hostname] = true
}
hosts := make([]string, 0)
for host := range uniqueHosts {
hosts = append(hosts, host)
}
return hosts
}
func MakeTargetClusterMessage(target *greenplum.Cluster) *idl.Message {
data := make(map[string]string)
data[idl.ResponseKey_target_port.String()] = strconv.Itoa(target.MasterPort())
data[idl.ResponseKey_target_master_data_directory.String()] = target.MasterDataDir()
return &idl.Message{
Contents: &idl.Message_Response{
Response: &idl.Response{Data: data},
},
}
}
| {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't try to
// start things up again.
s.stopped = nil
} | identifier_body |
server.go | // Copyright (c) 2017-2020 VMware, Inc. or its affiliates
// SPDX-License-Identifier: Apache-2.0
package hub
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/greenplum-db/gp-common-go-libs/gplog"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"golang.org/x/xerrors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/reflection"
grpcStatus "google.golang.org/grpc/status"
"github.com/greenplum-db/gpupgrade/greenplum"
"github.com/greenplum-db/gpupgrade/idl"
"github.com/greenplum-db/gpupgrade/upgrade"
"github.com/greenplum-db/gpupgrade/utils"
"github.com/greenplum-db/gpupgrade/utils/daemon"
"github.com/greenplum-db/gpupgrade/utils/log"
)
var DialTimeout = 3 * time.Second
// Returned from Server.Start() if Server.Stop() has already been called.
var ErrHubStopped = errors.New("hub is stopped")
type Dialer func(ctx context.Context, target string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
type Server struct {
*Config
StateDir string
agentConns []*Connection
grpcDialer Dialer
mu sync.Mutex
server *grpc.Server
lis net.Listener
// This is used both as a channel to communicate from Start() to
// Stop() to indicate to Stop() that it can finally terminate
// and also as a flag to communicate from Stop() to Start() that
// Stop() had already beed called, so no need to do anything further
// in Start().
// Note that when used as a flag, nil value means that Stop() has
// been called.
stopped chan struct{}
daemon bool
}
type Connection struct {
Conn *grpc.ClientConn
AgentClient idl.AgentClient
Hostname string
CancelContext func()
}
func New(conf *Config, grpcDialer Dialer, stateDir string) *Server {
h := &Server{
Config: conf,
StateDir: stateDir,
stopped: make(chan struct{}, 1),
grpcDialer: grpcDialer,
}
return h
}
// MakeDaemon tells the Server to disconnect its stdout/stderr streams after
// successfully starting up.
func (s *Server) MakeDaemon() {
s.daemon = true
}
func (s *Server) Start() error {
lis, err := net.Listen("tcp", ":"+strconv.Itoa(s.Port))
if err != nil {
return xerrors.Errorf("listen on port %d: %w", s.Port, err)
}
// Set up an interceptor function to log any panics we get from request
// handlers.
interceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
defer log.WritePanics()
return handler(ctx, req)
}
server := grpc.NewServer(grpc.UnaryInterceptor(interceptor))
s.mu.Lock()
if s.stopped == nil {
// Stop() has already been called; return without serving.
s.mu.Unlock()
return ErrHubStopped
}
s.server = server
s.lis = lis
s.mu.Unlock()
idl.RegisterCliToHubServer(server, s)
reflection.Register(server) | daemon.Daemonize()
}
err = server.Serve(lis)
if err != nil {
err = xerrors.Errorf("serve: %w", err)
}
// inform Stop() that is it is OK to stop now
s.stopped <- struct{}{}
return err
}
func (s *Server) StopServices(ctx context.Context, in *idl.StopServicesRequest) (*idl.StopServicesReply, error) {
err := s.StopAgents()
if err != nil {
gplog.Debug("failed to stop agents: %#v", err)
}
s.Stop(false)
return &idl.StopServicesReply{}, nil
}
// TODO: add unit tests for this; this is currently tricky due to h.AgentConns()
// mutating global state
func (s *Server) StopAgents() error {
// FIXME: s.AgentConns() fails fast if a single agent isn't available
// we need to connect to all available agents so we can stop just those
_, err := s.AgentConns()
if err != nil {
return err
}
var wg sync.WaitGroup
errs := make(chan error, len(s.agentConns))
for _, conn := range s.agentConns {
conn := conn
wg.Add(1)
go func() {
defer wg.Done()
_, err := conn.AgentClient.StopAgent(context.Background(), &idl.StopAgentRequest{})
if err == nil { // no error means the agent did not terminate as expected
errs <- xerrors.Errorf("failed to stop agent on host: %s", conn.Hostname)
return
}
// XXX: "transport is closing" is not documented but is needed to uniquely interpret codes.Unavailable
// https://github.com/grpc/grpc/blob/v1.24.0/doc/statuscodes.md
errStatus := grpcStatus.Convert(err)
if errStatus.Code() != codes.Unavailable || errStatus.Message() != "transport is closing" {
errs <- xerrors.Errorf("failed to stop agent on host %s : %w", conn.Hostname, err)
}
}()
}
wg.Wait()
close(errs)
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (s *Server) Stop(closeAgentConns bool) {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't try to
// start things up again.
s.stopped = nil
}
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context,
dialer func(context.Context, string) (net.Conn, error),
hostnames []string,
port int,
stateDir string) ([]string, error) {
var wg sync.WaitGroup
restartedHosts := make(chan string, len(hostnames))
errs := make(chan error, len(hostnames))
for _, host := range hostnames {
wg.Add(1)
go func(host string) {
defer wg.Done()
address := host + ":" + strconv.Itoa(port)
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 3*time.Second)
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
}
if dialer != nil {
opts = append(opts, grpc.WithContextDialer(dialer))
}
conn, err := grpc.DialContext(timeoutCtx, address, opts...)
cancelFunc()
if err == nil {
err = conn.Close()
if err != nil {
gplog.Error("failed to close agent connection to %s: %+v", host, err)
}
return
}
gplog.Debug("failed to dial agent on %s: %+v", host, err)
gplog.Info("starting agent on %s", host)
agentPath, err := getAgentPath()
if err != nil {
errs <- err
return
}
cmd := execCommand("ssh", host,
fmt.Sprintf("bash -c \"%s agent --daemonize --port %d --state-directory %s\"", agentPath, port, stateDir))
stdout, err := cmd.Output()
if err != nil {
errs <- err
return
}
gplog.Debug(string(stdout))
restartedHosts <- host
}(host)
}
wg.Wait()
close(errs)
close(restartedHosts)
var hosts []string
for h := range restartedHosts {
hosts = append(hosts, h)
}
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return hosts, multiErr.ErrorOrNil()
}
func (s *Server) AgentConns() ([]*Connection, error) {
// Lock the mutex to protect against races with Server.Stop().
// XXX This is a *ridiculously* broad lock. Have fun waiting for the dial
// timeout when calling Stop() and AgentConns() at the same time, for
// instance. We should not lock around a network operation, but it seems
// like the AgentConns concept is not long for this world anyway.
s.mu.Lock()
defer s.mu.Unlock()
if s.agentConns != nil {
err := EnsureConnsAreReady(s.agentConns)
if err != nil {
gplog.Error("ensureConnsAreReady failed: %s", err)
return nil, err
}
return s.agentConns, nil
}
hostnames := AgentHosts(s.Source)
for _, host := range hostnames {
ctx, cancelFunc := context.WithTimeout(context.Background(), DialTimeout)
conn, err := s.grpcDialer(ctx,
host+":"+strconv.Itoa(s.AgentPort),
grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
err = xerrors.Errorf("grpcDialer failed: %w", err)
gplog.Error(err.Error())
cancelFunc()
return nil, err
}
s.agentConns = append(s.agentConns, &Connection{
Conn: conn,
AgentClient: idl.NewAgentClient(conn),
Hostname: host,
CancelContext: cancelFunc,
})
}
return s.agentConns, nil
}
func EnsureConnsAreReady(agentConns []*Connection) error {
hostnames := []string{}
for _, conn := range agentConns {
if conn.Conn.GetState() != connectivity.Ready {
hostnames = append(hostnames, conn.Hostname)
}
}
if len(hostnames) > 0 {
return fmt.Errorf("the connections to the following hosts were not ready: %s", strings.Join(hostnames, ","))
}
return nil
}
// Closes all h.agentConns. Callers must hold the Server's mutex.
// TODO: this function assumes that all h.agentConns are _not_ in a terminal
// state(e.g. already closed). If so, conn.Conn.WaitForStateChange() can block
// indefinitely.
func (s *Server) closeAgentConns() {
for _, conn := range s.agentConns {
defer conn.CancelContext()
currState := conn.Conn.GetState()
err := conn.Conn.Close()
if err != nil {
gplog.Info(fmt.Sprintf("Error closing hub to agent connection. host: %s, err: %s", conn.Hostname, err.Error()))
}
conn.Conn.WaitForStateChange(context.Background(), currState)
}
}
type InitializeConfig struct {
Standby greenplum.SegConfig
Master greenplum.SegConfig
Primaries []greenplum.SegConfig
Mirrors []greenplum.SegConfig
}
// Config contains all the information that will be persisted to/loaded from
// from disk during calls to Save() and Load().
type Config struct {
Source *greenplum.Cluster
Target *greenplum.Cluster
// TargetInitializeConfig contains all the info needed to initialize the
// target cluster's master, standby, primaries and mirrors.
TargetInitializeConfig InitializeConfig
Port int
AgentPort int
UseLinkMode bool
UpgradeID upgrade.ID
// Tablespaces contains the tablespace in the database keyed by
// dbid and tablespace oid
Tablespaces greenplum.Tablespaces
TablespacesMappingFilePath string
}
func (c *Config) Load(r io.Reader) error {
dec := json.NewDecoder(r)
return dec.Decode(c)
}
func (c *Config) Save(w io.Writer) error {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(c)
}
// SaveConfig persists the hub's configuration to disk.
func (s *Server) SaveConfig() (err error) {
// TODO: Switch to an atomic implementation like renameio. Consider what
// happens if Config.Save() panics: we'll have truncated the file
// on disk and the hub will be unable to recover. For now, since we normally
// only save the configuration during initialize and any configuration
// errors could be fixed by reinitializing, the risk seems small.
file, err := utils.System.Create(upgrade.GetConfigFile())
if err != nil {
return err
}
defer func() {
if cerr := file.Close(); cerr != nil {
cerr = xerrors.Errorf("closing hub configuration: %w", cerr)
err = multierror.Append(err, cerr).ErrorOrNil()
}
}()
err = s.Config.Save(file)
if err != nil {
return xerrors.Errorf("saving hub configuration: %w", err)
}
return nil
}
func LoadConfig(conf *Config, path string) error {
file, err := os.Open(path)
if err != nil {
return xerrors.Errorf("opening configuration file: %w", err)
}
defer file.Close()
err = conf.Load(file)
if err != nil {
return xerrors.Errorf("reading configuration file: %w", err)
}
return nil
}
func AgentHosts(c *greenplum.Cluster) []string {
uniqueHosts := make(map[string]bool)
excludingMaster := func(seg *greenplum.SegConfig) bool {
return !seg.IsMaster()
}
for _, seg := range c.SelectSegments(excludingMaster) {
uniqueHosts[seg.Hostname] = true
}
hosts := make([]string, 0)
for host := range uniqueHosts {
hosts = append(hosts, host)
}
return hosts
}
func MakeTargetClusterMessage(target *greenplum.Cluster) *idl.Message {
data := make(map[string]string)
data[idl.ResponseKey_target_port.String()] = strconv.Itoa(target.MasterPort())
data[idl.ResponseKey_target_master_data_directory.String()] = target.MasterDataDir()
return &idl.Message{
Contents: &idl.Message_Response{
Response: &idl.Response{Data: data},
},
}
} |
if s.daemon {
fmt.Printf("Hub started on port %d (pid %d)\n", s.Port, os.Getpid()) | random_line_split |
console_test.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/kr/pty"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/pkg/unet"
)
// socketPath creates a path inside bundleDir and ensures that the returned
// path is under 108 charactors (the unix socket path length limit),
// relativizing the path if necessary.
func socketPath(bundleDir string) (string, error) {
num := rand.Intn(10000)
path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num))
const maxPathLen = 108
if len(path) <= maxPathLen {
return path, nil
}
// Path is too large, try to make it smaller.
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("error getting cwd: %v", err)
}
path, err = filepath.Rel(cwd, path)
if err != nil {
return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err)
}
if len(path) > maxPathLen {
return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path)
}
return path, nil
}
// createConsoleSocket creates a socket at the given path that will receive a
// console fd from the sandbox. If an error occurs, t.Fatalf will be called.
// The function returning should be deferred as cleanup.
func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) {
t.Helper()
srv, err := unet.BindAndListen(path, false)
if err != nil {
t.Fatalf("error binding and listening to socket %q: %v", path, err)
}
cleanup := func() {
// Log errors; nothing can be done.
if err := srv.Close(); err != nil {
t.Logf("error closing socket %q: %v", path, err)
}
if err := os.Remove(path); err != nil {
t.Logf("error removing socket %q: %v", path, err)
}
}
return srv, cleanup
}
// receiveConsolePTY accepts a connection on the server socket and reads fds.
// It fails if more than one FD is received, or if the FD is not a PTY. It
// returns the PTY master file.
func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {
sock, err := srv.Accept()
if err != nil {
return nil, fmt.Errorf("error accepting socket connection: %v", err)
}
// Allow 3 fds to be received. We only expect 1.
r := sock.Reader(true /* blocking */)
r.EnableFDs(1)
// The socket is closed right after sending the FD, so EOF is
// an allowed error.
b := [][]byte{{}}
if _, err := r.ReadVec(b); err != nil && err != io.EOF {
return nil, fmt.Errorf("error reading from socket connection: %v", err)
}
// We should have gotten a control message.
fds, err := r.ExtractFDs()
if err != nil {
return nil, fmt.Errorf("error extracting fds from socket connection: %v", err)
}
if len(fds) != 1 {
return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds))
}
// Verify that the fd is a terminal.
if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil {
return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err)
}
return os.NewFile(uintptr(fds[0]), "pty_master"), nil
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestConsoleSocket(t *testing.T) |
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that job control signals work on a console created with "run -ti".
func TestJobControlSignalRootContainer(t *testing.T) {
conf := testutil.TestConfig(t)
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Get the PTY master.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debugabilitly, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(ptyMaster, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Start the container.
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Start waiting for the container to exit in a goroutine. We do this
// very early, otherwise it might exit before we have a chance to call
// Wait.
var (
ws unix.WaitStatus
wg sync.WaitGroup
)
wg.Add(1)
go func() {
var err error
ws, err = c.Wait()
if err != nil {
t.Errorf("error waiting on container: %v", err)
}
wg.Done()
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().PID(1).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute sleep via the terminal.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write(): %v", err)
}
// Wait for sleep to start.
expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Reset the pty buffer, so there is less output for us to scan later.
ptyBuf.Reset()
// Send a SIGTERM to the foreground process. We pass PID=0, indicating
// that the root process should be killed. However, by setting
// fgProcess=true, the signal should actually be sent to sleep.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Wait for the sandbox to exit. It should exit with a SIGKILL status.
wg.Wait()
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that terminal works with root and sub-containers.
func TestMultiContainerTerminal(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
bash := []string{"/bin/bash", "--noprofile", "--norc"}
testSpecs, ids := createSpecs(bash, bash)
type termContainer struct {
container *Container
master *os.File
}
var containers []termContainer
for i, spec := range testSpecs {
bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
spec.Process.Terminal = true
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: ids[i],
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
containers = append(containers, termContainer{
container: cont,
master: ptyMaster,
})
}
for _, tc := range containers {
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debuggability, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(tc.master, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().Cmd("bash").Process(),
}
if err := waitForProcessList(tc.container, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute echo command and check that it was executed correctly. Use
// a variable to ensure it's not matching against command echo.
if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil {
t.Fatalf("master.Write(): %v", err)
}
if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil {
t.Fatalf("echo didn't execute: %v", err)
}
}
})
}
}
// blockingBuffer is a thread-safe buffer that blocks when reading if the
// buffer is empty. It implements io.ReadWriter.
type blockingBuffer struct {
// A send to readCh indicates that a previously empty buffer now has
// data for reading.
readCh chan struct{}
// mu protects buf.
mu sync.Mutex
buf bytes.Buffer
}
func newBlockingBuffer() *blockingBuffer {
return &blockingBuffer{
readCh: make(chan struct{}, 1),
}
}
// Write implements Writer.Write.
func (bb *blockingBuffer) Write(p []byte) (int, error) {
bb.mu.Lock()
defer bb.mu.Unlock()
l := bb.buf.Len()
n, err := bb.buf.Write(p)
if l == 0 && n > 0 {
// New data!
bb.readCh <- struct{}{}
}
return n, err
}
// Read implements Reader.Read. It will block until data is available.
func (bb *blockingBuffer) Read(p []byte) (int, error) {
for {
bb.mu.Lock()
n, err := bb.buf.Read(p)
if n > 0 || err != io.EOF {
if bb.buf.Len() == 0 {
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
}
bb.mu.Unlock()
return n, err
}
bb.mu.Unlock()
// Wait for new data.
<-bb.readCh
}
}
// Reset resets the buffer.
func (bb *blockingBuffer) Reset() {
bb.mu.Lock()
defer bb.mu.Unlock()
bb.buf.Reset()
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
}
| {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
} | identifier_body |
console_test.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/kr/pty"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/pkg/unet"
)
// socketPath creates a path inside bundleDir and ensures that the returned
// path is under 108 charactors (the unix socket path length limit),
// relativizing the path if necessary.
func socketPath(bundleDir string) (string, error) {
num := rand.Intn(10000)
path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num))
const maxPathLen = 108
if len(path) <= maxPathLen {
return path, nil
}
// Path is too large, try to make it smaller.
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("error getting cwd: %v", err)
}
path, err = filepath.Rel(cwd, path)
if err != nil {
return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err)
}
if len(path) > maxPathLen {
return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path)
}
return path, nil
}
// createConsoleSocket creates a socket at the given path that will receive a
// console fd from the sandbox. If an error occurs, t.Fatalf will be called.
// The function returning should be deferred as cleanup.
func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) {
t.Helper()
srv, err := unet.BindAndListen(path, false)
if err != nil {
t.Fatalf("error binding and listening to socket %q: %v", path, err)
}
cleanup := func() {
// Log errors; nothing can be done.
if err := srv.Close(); err != nil {
t.Logf("error closing socket %q: %v", path, err)
}
if err := os.Remove(path); err != nil {
t.Logf("error removing socket %q: %v", path, err)
}
}
return srv, cleanup
}
// receiveConsolePTY accepts a connection on the server socket and reads fds.
// It fails if more than one FD is received, or if the FD is not a PTY. It
// returns the PTY master file.
func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {
sock, err := srv.Accept()
if err != nil {
return nil, fmt.Errorf("error accepting socket connection: %v", err)
}
// Allow 3 fds to be received. We only expect 1.
r := sock.Reader(true /* blocking */)
r.EnableFDs(1)
// The socket is closed right after sending the FD, so EOF is
// an allowed error.
b := [][]byte{{}}
if _, err := r.ReadVec(b); err != nil && err != io.EOF {
return nil, fmt.Errorf("error reading from socket connection: %v", err)
}
// We should have gotten a control message.
fds, err := r.ExtractFDs()
if err != nil {
return nil, fmt.Errorf("error extracting fds from socket connection: %v", err)
}
if len(fds) != 1 {
return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds))
}
// Verify that the fd is a terminal.
if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil {
return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err)
}
return os.NewFile(uintptr(fds[0]), "pty_master"), nil
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
| t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that job control signals work on a console created with "run -ti".
func TestJobControlSignalRootContainer(t *testing.T) {
conf := testutil.TestConfig(t)
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Get the PTY master.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debugabilitly, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(ptyMaster, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Start the container.
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Start waiting for the container to exit in a goroutine. We do this
// very early, otherwise it might exit before we have a chance to call
// Wait.
var (
ws unix.WaitStatus
wg sync.WaitGroup
)
wg.Add(1)
go func() {
var err error
ws, err = c.Wait()
if err != nil {
t.Errorf("error waiting on container: %v", err)
}
wg.Done()
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().PID(1).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute sleep via the terminal.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write(): %v", err)
}
// Wait for sleep to start.
expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Reset the pty buffer, so there is less output for us to scan later.
ptyBuf.Reset()
// Send a SIGTERM to the foreground process. We pass PID=0, indicating
// that the root process should be killed. However, by setting
// fgProcess=true, the signal should actually be sent to sleep.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Wait for the sandbox to exit. It should exit with a SIGKILL status.
wg.Wait()
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that terminal works with root and sub-containers.
func TestMultiContainerTerminal(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
bash := []string{"/bin/bash", "--noprofile", "--norc"}
testSpecs, ids := createSpecs(bash, bash)
type termContainer struct {
container *Container
master *os.File
}
var containers []termContainer
for i, spec := range testSpecs {
bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
spec.Process.Terminal = true
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: ids[i],
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
containers = append(containers, termContainer{
container: cont,
master: ptyMaster,
})
}
for _, tc := range containers {
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debuggability, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(tc.master, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().Cmd("bash").Process(),
}
if err := waitForProcessList(tc.container, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute echo command and check that it was executed correctly. Use
// a variable to ensure it's not matching against command echo.
if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil {
t.Fatalf("master.Write(): %v", err)
}
if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil {
t.Fatalf("echo didn't execute: %v", err)
}
}
})
}
}
// blockingBuffer is a thread-safe buffer that blocks when reading if the
// buffer is empty. It implements io.ReadWriter.
type blockingBuffer struct {
// A send to readCh indicates that a previously empty buffer now has
// data for reading.
readCh chan struct{}
// mu protects buf.
mu sync.Mutex
buf bytes.Buffer
}
func newBlockingBuffer() *blockingBuffer {
return &blockingBuffer{
readCh: make(chan struct{}, 1),
}
}
// Write implements Writer.Write.
func (bb *blockingBuffer) Write(p []byte) (int, error) {
bb.mu.Lock()
defer bb.mu.Unlock()
l := bb.buf.Len()
n, err := bb.buf.Write(p)
if l == 0 && n > 0 {
// New data!
bb.readCh <- struct{}{}
}
return n, err
}
// Read implements Reader.Read. It will block until data is available.
func (bb *blockingBuffer) Read(p []byte) (int, error) {
for {
bb.mu.Lock()
n, err := bb.buf.Read(p)
if n > 0 || err != io.EOF {
if bb.buf.Len() == 0 {
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
}
bb.mu.Unlock()
return n, err
}
bb.mu.Unlock()
// Wait for new data.
<-bb.readCh
}
}
// Reset resets the buffer.
func (bb *blockingBuffer) Reset() {
bb.mu.Lock()
defer bb.mu.Unlock()
bb.buf.Reset()
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
} | sock, err := socketPath(bundleDir)
if err != nil { | random_line_split |
console_test.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/kr/pty"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/pkg/unet"
)
// socketPath creates a path inside bundleDir and ensures that the returned
// path is under 108 charactors (the unix socket path length limit),
// relativizing the path if necessary.
func socketPath(bundleDir string) (string, error) {
num := rand.Intn(10000)
path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num))
const maxPathLen = 108
if len(path) <= maxPathLen {
return path, nil
}
// Path is too large, try to make it smaller.
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("error getting cwd: %v", err)
}
path, err = filepath.Rel(cwd, path)
if err != nil {
return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err)
}
if len(path) > maxPathLen {
return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path)
}
return path, nil
}
// createConsoleSocket creates a socket at the given path that will receive a
// console fd from the sandbox. If an error occurs, t.Fatalf will be called.
// The function returning should be deferred as cleanup.
func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) {
t.Helper()
srv, err := unet.BindAndListen(path, false)
if err != nil {
t.Fatalf("error binding and listening to socket %q: %v", path, err)
}
cleanup := func() {
// Log errors; nothing can be done.
if err := srv.Close(); err != nil {
t.Logf("error closing socket %q: %v", path, err)
}
if err := os.Remove(path); err != nil {
t.Logf("error removing socket %q: %v", path, err)
}
}
return srv, cleanup
}
// receiveConsolePTY accepts a connection on the server socket and reads fds.
// It fails if more than one FD is received, or if the FD is not a PTY. It
// returns the PTY master file.
func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {
sock, err := srv.Accept()
if err != nil {
return nil, fmt.Errorf("error accepting socket connection: %v", err)
}
// Allow 3 fds to be received. We only expect 1.
r := sock.Reader(true /* blocking */)
r.EnableFDs(1)
// The socket is closed right after sending the FD, so EOF is
// an allowed error.
b := [][]byte{{}}
if _, err := r.ReadVec(b); err != nil && err != io.EOF {
return nil, fmt.Errorf("error reading from socket connection: %v", err)
}
// We should have gotten a control message.
fds, err := r.ExtractFDs()
if err != nil {
return nil, fmt.Errorf("error extracting fds from socket connection: %v", err)
}
if len(fds) != 1 {
return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds))
}
// Verify that the fd is a terminal.
if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil {
return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err)
}
return os.NewFile(uintptr(fds[0]), "pty_master"), nil
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil |
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that job control signals work on a console created with "run -ti".
func TestJobControlSignalRootContainer(t *testing.T) {
conf := testutil.TestConfig(t)
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Get the PTY master.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debugabilitly, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(ptyMaster, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Start the container.
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Start waiting for the container to exit in a goroutine. We do this
// very early, otherwise it might exit before we have a chance to call
// Wait.
var (
ws unix.WaitStatus
wg sync.WaitGroup
)
wg.Add(1)
go func() {
var err error
ws, err = c.Wait()
if err != nil {
t.Errorf("error waiting on container: %v", err)
}
wg.Done()
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().PID(1).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute sleep via the terminal.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write(): %v", err)
}
// Wait for sleep to start.
expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Reset the pty buffer, so there is less output for us to scan later.
ptyBuf.Reset()
// Send a SIGTERM to the foreground process. We pass PID=0, indicating
// that the root process should be killed. However, by setting
// fgProcess=true, the signal should actually be sent to sleep.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Wait for the sandbox to exit. It should exit with a SIGKILL status.
wg.Wait()
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that terminal works with root and sub-containers.
func TestMultiContainerTerminal(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
bash := []string{"/bin/bash", "--noprofile", "--norc"}
testSpecs, ids := createSpecs(bash, bash)
type termContainer struct {
container *Container
master *os.File
}
var containers []termContainer
for i, spec := range testSpecs {
bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
spec.Process.Terminal = true
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: ids[i],
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
containers = append(containers, termContainer{
container: cont,
master: ptyMaster,
})
}
for _, tc := range containers {
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debuggability, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(tc.master, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().Cmd("bash").Process(),
}
if err := waitForProcessList(tc.container, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute echo command and check that it was executed correctly. Use
// a variable to ensure it's not matching against command echo.
if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil {
t.Fatalf("master.Write(): %v", err)
}
if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil {
t.Fatalf("echo didn't execute: %v", err)
}
}
})
}
}
// blockingBuffer is a thread-safe buffer that blocks when reading if the
// buffer is empty. It implements io.ReadWriter.
type blockingBuffer struct {
// A send to readCh indicates that a previously empty buffer now has
// data for reading.
readCh chan struct{}
// mu protects buf.
mu sync.Mutex
buf bytes.Buffer
}
func newBlockingBuffer() *blockingBuffer {
return &blockingBuffer{
readCh: make(chan struct{}, 1),
}
}
// Write implements Writer.Write.
func (bb *blockingBuffer) Write(p []byte) (int, error) {
bb.mu.Lock()
defer bb.mu.Unlock()
l := bb.buf.Len()
n, err := bb.buf.Write(p)
if l == 0 && n > 0 {
// New data!
bb.readCh <- struct{}{}
}
return n, err
}
// Read implements Reader.Read. It will block until data is available.
func (bb *blockingBuffer) Read(p []byte) (int, error) {
for {
bb.mu.Lock()
n, err := bb.buf.Read(p)
if n > 0 || err != io.EOF {
if bb.buf.Len() == 0 {
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
}
bb.mu.Unlock()
return n, err
}
bb.mu.Unlock()
// Wait for new data.
<-bb.readCh
}
}
// Reset resets the buffer.
func (bb *blockingBuffer) Reset() {
bb.mu.Lock()
defer bb.mu.Unlock()
bb.buf.Reset()
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
}
| {
t.Fatalf("error receiving console FD: %v", err)
} | conditional_block |
console_test.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package container
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/kr/pty"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/pkg/unet"
)
// socketPath creates a path inside bundleDir and ensures that the returned
// path is under 108 charactors (the unix socket path length limit),
// relativizing the path if necessary.
func socketPath(bundleDir string) (string, error) {
num := rand.Intn(10000)
path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num))
const maxPathLen = 108
if len(path) <= maxPathLen {
return path, nil
}
// Path is too large, try to make it smaller.
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("error getting cwd: %v", err)
}
path, err = filepath.Rel(cwd, path)
if err != nil {
return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err)
}
if len(path) > maxPathLen {
return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path)
}
return path, nil
}
// createConsoleSocket creates a socket at the given path that will receive a
// console fd from the sandbox. If an error occurs, t.Fatalf will be called.
// The function returning should be deferred as cleanup.
func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) {
t.Helper()
srv, err := unet.BindAndListen(path, false)
if err != nil {
t.Fatalf("error binding and listening to socket %q: %v", path, err)
}
cleanup := func() {
// Log errors; nothing can be done.
if err := srv.Close(); err != nil {
t.Logf("error closing socket %q: %v", path, err)
}
if err := os.Remove(path); err != nil {
t.Logf("error removing socket %q: %v", path, err)
}
}
return srv, cleanup
}
// receiveConsolePTY accepts a connection on the server socket and reads fds.
// It fails if more than one FD is received, or if the FD is not a PTY. It
// returns the PTY master file.
func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {
sock, err := srv.Accept()
if err != nil {
return nil, fmt.Errorf("error accepting socket connection: %v", err)
}
// Allow 3 fds to be received. We only expect 1.
r := sock.Reader(true /* blocking */)
r.EnableFDs(1)
// The socket is closed right after sending the FD, so EOF is
// an allowed error.
b := [][]byte{{}}
if _, err := r.ReadVec(b); err != nil && err != io.EOF {
return nil, fmt.Errorf("error reading from socket connection: %v", err)
}
// We should have gotten a control message.
fds, err := r.ExtractFDs()
if err != nil {
return nil, fmt.Errorf("error extracting fds from socket connection: %v", err)
}
if len(fds) != 1 {
return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds))
}
// Verify that the fd is a terminal.
if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil {
return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err)
}
return os.NewFile(uintptr(fds[0]), "pty_master"), nil
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func | (t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that job control signals work on a console created with "run -ti".
func TestJobControlSignalRootContainer(t *testing.T) {
conf := testutil.TestConfig(t)
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Get the PTY master.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debugabilitly, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(ptyMaster, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Start the container.
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Start waiting for the container to exit in a goroutine. We do this
// very early, otherwise it might exit before we have a chance to call
// Wait.
var (
ws unix.WaitStatus
wg sync.WaitGroup
)
wg.Add(1)
go func() {
var err error
ws, err = c.Wait()
if err != nil {
t.Errorf("error waiting on container: %v", err)
}
wg.Done()
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().PID(1).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute sleep via the terminal.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write(): %v", err)
}
// Wait for sleep to start.
expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Reset the pty buffer, so there is less output for us to scan later.
ptyBuf.Reset()
// Send a SIGTERM to the foreground process. We pass PID=0, indicating
// that the root process should be killed. However, by setting
// fgProcess=true, the signal should actually be sent to sleep.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Wait for the sandbox to exit. It should exit with a SIGKILL status.
wg.Wait()
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
// Test that terminal works with root and sub-containers.
func TestMultiContainerTerminal(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Don't let bash execute from profile or rc files, otherwise our PID
// counts get messed up.
bash := []string{"/bin/bash", "--noprofile", "--norc"}
testSpecs, ids := createSpecs(bash, bash)
type termContainer struct {
container *Container
master *os.File
}
var containers []termContainer
for i, spec := range testSpecs {
bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
spec.Process.Terminal = true
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: ids[i],
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
defer ptyMaster.Close()
containers = append(containers, termContainer{
container: cont,
master: ptyMaster,
})
}
for _, tc := range containers {
// Bash output as well as sandbox output will be written to the PTY
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
// We log the output to stderr for debuggability, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(tc.master, ptyBuf)
go func() {
_, _ = io.Copy(os.Stderr, tee)
}()
// Wait for bash to start.
expectedPL := []*control.Process{
newProcessBuilder().Cmd("bash").Process(),
}
if err := waitForProcessList(tc.container, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
// Execute echo command and check that it was executed correctly. Use
// a variable to ensure it's not matching against command echo.
if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil {
t.Fatalf("master.Write(): %v", err)
}
if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil {
t.Fatalf("echo didn't execute: %v", err)
}
}
})
}
}
// blockingBuffer is a thread-safe buffer that blocks when reading if the
// buffer is empty. It implements io.ReadWriter.
type blockingBuffer struct {
// A send to readCh indicates that a previously empty buffer now has
// data for reading.
readCh chan struct{}
// mu protects buf.
mu sync.Mutex
buf bytes.Buffer
}
func newBlockingBuffer() *blockingBuffer {
return &blockingBuffer{
readCh: make(chan struct{}, 1),
}
}
// Write implements Writer.Write.
func (bb *blockingBuffer) Write(p []byte) (int, error) {
bb.mu.Lock()
defer bb.mu.Unlock()
l := bb.buf.Len()
n, err := bb.buf.Write(p)
if l == 0 && n > 0 {
// New data!
bb.readCh <- struct{}{}
}
return n, err
}
// Read implements Reader.Read. It will block until data is available.
func (bb *blockingBuffer) Read(p []byte) (int, error) {
for {
bb.mu.Lock()
n, err := bb.buf.Read(p)
if n > 0 || err != io.EOF {
if bb.buf.Len() == 0 {
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
}
bb.mu.Unlock()
return n, err
}
bb.mu.Unlock()
// Wait for new data.
<-bb.readCh
}
}
// Reset resets the buffer.
func (bb *blockingBuffer) Reset() {
bb.mu.Lock()
defer bb.mu.Unlock()
bb.buf.Reset()
// Reset the readCh.
select {
case <-bb.readCh:
default:
}
}
| TestJobControlSignalExec | identifier_name |
build.py | #!/usr/bin/python
# Copyright 2010 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import action_tree
import cmd_env
script_dir = os.path.abspath(os.path.dirname(__file__))
# This allows "src" to be a symlink pointing to NaCl's "trunk/src".
nacl_src = os.path.join(script_dir, "src")
# Otherwise we expect to live inside the NaCl tree.
if not os.path.exists(nacl_src):
nacl_src = os.path.normpath(os.path.join(script_dir, "..", "..", ".."))
nacl_dir = os.path.join(nacl_src, "native_client")
subdirs = [
"third_party/gcc",
"third_party/binutils",
"third_party/newlib",
"native_client/tools/patches"]
search_path = [os.path.join(nacl_src, subdir) for subdir in subdirs]
def find_file(name):
for dir_path in search_path:
filename = os.path.join(dir_path, name)
if os.path.exists(filename):
return filename
raise Exception("Couldn't find %r in %r" % (name, search_path))
def get_one(lst):
assert len(lst) == 1, lst
return lst[0]
def write_file(filename, data):
fh = open(filename, "w")
try:
fh.write(data)
finally:
fh.close()
def mkdir_p(dir_path):
subprocess.check_call(["mkdir", "-p", dir_path])
class DirTree(object):
# write_tree(dest_dir) makes a fresh copy of the tree in dest_dir.
# It can assume that dest_dir is initially empty.
# The state of dest_dir is undefined if write_tree() fails.
def write_tree(self, env, dest_dir):
raise NotImplementedError()
class EmptyTree(DirTree):
def write_tree(self, env, dest_dir):
pass
class TarballTree(DirTree):
def __init__(self, tar_path):
self._tar_path = tar_path
def write_tree(self, env, dest_dir):
# Tarballs normally contain a single top-level directory with
# a name like foo-module-1.2.3. We strip this off.
assert os.listdir(dest_dir) == []
env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
# This handles gcc, where two source tarballs must be unpacked on top
# of each other.
class MultiTarballTree(DirTree):
def __init__(self, tar_paths):
self._tar_paths = tar_paths
def write_tree(self, env, dest_dir):
assert os.listdir(dest_dir) == []
for tar_file in self._tar_paths:
env.cmd(["tar", "-C", dest_dir, "-xf", tar_file])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
class PatchedTree(DirTree):
def __init__(self, orig_tree, patch_file):
self._orig_tree = orig_tree
self._patch_file = patch_file
def write_tree(self, env, dest_dir):
self._orig_tree.write_tree(env, dest_dir)
env.cmd(["patch", "-d", dest_dir, "-p1", "-i", self._patch_file])
class EnvVarEnv(object):
def __init__(self, envvars, env):
self._envvars = envvars
self._env = env
def cmd(self, args, **kwargs):
return self._env.cmd(
["env"] + ["%s=%s" % (key, value) for key, value in self._envvars]
+ args, **kwargs)
class ModuleBase(object):
def __init__(self, source_dir, build_dir, prefix, install_dir, env_vars):
self._env = cmd_env.VerboseWrapper(cmd_env.BasicEnv())
self._source_dir = source_dir
self._build_dir = build_dir
self._prefix = prefix
self._install_dir = install_dir
self._build_env = cmd_env.PrefixCmdEnv(
cmd_env.in_dir(self._build_dir), EnvVarEnv(env_vars, self._env))
self._args = {"prefix": self._prefix,
"source_dir": self._source_dir}
def all(self):
return action_tree.make_node(
[self.unpack, self.configure, self.make, self.install], self.name)
def unpack(self, log):
if not os.path.exists(self._source_dir):
|
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname),
"-t", dest_dir])
def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def install(self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" % dest]))
class ModuleNcthreads(ModuleBase):
name = "nc_threads"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "install_libpthread",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnaclHeaders(ModuleBase):
name = "libnacl_headers"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update_header",
"USE_PATH=1",
"nocpp=yes",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnacl(ModuleBase):
# Covers libnacl.a, crt[1ni].o and misc libraries built with Scons.
name = "libnacl"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class TestModule(ModuleBase):
name = "test"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
mkdir_p(self._build_dir)
write_file(os.path.join(self._build_dir, "hellow.c"), """
#include <stdio.h>
int main() {
printf("Hello world\\n");
return 0;
}
""")
self._build_env.cmd(["sh", "-c", "nacl-gcc hellow.c -o hellow"])
def install(self, log):
pass
def add_to_path(path, dir_path):
return "%s:%s" % (dir_path, path)
mods = [
ModuleBinutils,
ModulePregcc,
ModuleNewlib,
ModuleNcthreads,
ModuleFullgcc,
ModuleLibnaclHeaders,
ModuleLibnacl,
TestModule,
]
def all_mods(top_dir, use_shared_prefix):
nodes = []
env_vars = []
path_dirs = []
source_base = os.path.join(top_dir, "source")
if use_shared_prefix:
base_dir = os.path.join(top_dir, "shared")
prefix = os.path.join(base_dir, "prefix")
path_dirs.append(os.path.join(prefix, "bin"))
else:
base_dir = os.path.join(top_dir, "split")
prefix_base = os.path.join(base_dir, "prefixes")
for mod in mods:
if not use_shared_prefix:
# TODO: In split-prefix case, we don't really need "prefix" dirs.
# Just use the "install" dirs.
prefix = os.path.join(prefix_base, mod.name)
path_dirs.append(os.path.join(prefix, "bin"))
source_dir = os.path.join(source_base, mod.name)
build_dir = os.path.join(base_dir, "build", mod.name)
install_dir = os.path.join(base_dir, "install", mod.name)
builder = mod(source_dir, build_dir, prefix, install_dir, env_vars)
nodes.append(builder.all())
env_vars.append(("PATH",
reduce(add_to_path, path_dirs, os.environ["PATH"])))
return action_tree.make_node(nodes, name="all")
def main(args):
base_dir = os.getcwd()
top = all_mods(base_dir, use_shared_prefix=True)
action_tree.action_main(top, args)
if __name__ == "__main__":
main(sys.argv[1:])
| temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir) | conditional_block |
build.py | #!/usr/bin/python
# Copyright 2010 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import action_tree
import cmd_env
script_dir = os.path.abspath(os.path.dirname(__file__))
# This allows "src" to be a symlink pointing to NaCl's "trunk/src".
nacl_src = os.path.join(script_dir, "src")
# Otherwise we expect to live inside the NaCl tree.
if not os.path.exists(nacl_src):
nacl_src = os.path.normpath(os.path.join(script_dir, "..", "..", ".."))
nacl_dir = os.path.join(nacl_src, "native_client")
subdirs = [
"third_party/gcc",
"third_party/binutils",
"third_party/newlib",
"native_client/tools/patches"]
search_path = [os.path.join(nacl_src, subdir) for subdir in subdirs]
def find_file(name):
for dir_path in search_path:
filename = os.path.join(dir_path, name)
if os.path.exists(filename):
return filename
raise Exception("Couldn't find %r in %r" % (name, search_path))
def get_one(lst):
assert len(lst) == 1, lst
return lst[0]
def write_file(filename, data):
fh = open(filename, "w")
try:
fh.write(data)
finally:
fh.close()
def mkdir_p(dir_path):
subprocess.check_call(["mkdir", "-p", dir_path])
class DirTree(object):
# write_tree(dest_dir) makes a fresh copy of the tree in dest_dir.
# It can assume that dest_dir is initially empty.
# The state of dest_dir is undefined if write_tree() fails.
def write_tree(self, env, dest_dir):
raise NotImplementedError()
class EmptyTree(DirTree):
def write_tree(self, env, dest_dir):
pass
class TarballTree(DirTree):
def __init__(self, tar_path):
self._tar_path = tar_path
def write_tree(self, env, dest_dir):
# Tarballs normally contain a single top-level directory with
# a name like foo-module-1.2.3. We strip this off.
assert os.listdir(dest_dir) == []
env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
# This handles gcc, where two source tarballs must be unpacked on top
# of each other.
class MultiTarballTree(DirTree):
def __init__(self, tar_paths):
self._tar_paths = tar_paths
def write_tree(self, env, dest_dir):
assert os.listdir(dest_dir) == []
for tar_file in self._tar_paths:
env.cmd(["tar", "-C", dest_dir, "-xf", tar_file])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
class PatchedTree(DirTree):
def __init__(self, orig_tree, patch_file):
self._orig_tree = orig_tree
self._patch_file = patch_file
def write_tree(self, env, dest_dir):
self._orig_tree.write_tree(env, dest_dir)
env.cmd(["patch", "-d", dest_dir, "-p1", "-i", self._patch_file])
class EnvVarEnv(object):
def __init__(self, envvars, env):
self._envvars = envvars
self._env = env
def cmd(self, args, **kwargs):
return self._env.cmd(
["env"] + ["%s=%s" % (key, value) for key, value in self._envvars]
+ args, **kwargs)
class ModuleBase(object):
def __init__(self, source_dir, build_dir, prefix, install_dir, env_vars):
self._env = cmd_env.VerboseWrapper(cmd_env.BasicEnv())
self._source_dir = source_dir
self._build_dir = build_dir
self._prefix = prefix
self._install_dir = install_dir
self._build_env = cmd_env.PrefixCmdEnv(
cmd_env.in_dir(self._build_dir), EnvVarEnv(env_vars, self._env))
self._args = {"prefix": self._prefix,
"source_dir": self._source_dir}
def all(self):
return action_tree.make_node(
[self.unpack, self.configure, self.make, self.install], self.name)
def unpack(self, log):
if not os.path.exists(self._source_dir):
temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir)
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname),
"-t", dest_dir])
def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def | (self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" % dest]))
class ModuleNcthreads(ModuleBase):
name = "nc_threads"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "install_libpthread",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnaclHeaders(ModuleBase):
name = "libnacl_headers"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update_header",
"USE_PATH=1",
"nocpp=yes",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnacl(ModuleBase):
# Covers libnacl.a, crt[1ni].o and misc libraries built with Scons.
name = "libnacl"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class TestModule(ModuleBase):
name = "test"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
mkdir_p(self._build_dir)
write_file(os.path.join(self._build_dir, "hellow.c"), """
#include <stdio.h>
int main() {
printf("Hello world\\n");
return 0;
}
""")
self._build_env.cmd(["sh", "-c", "nacl-gcc hellow.c -o hellow"])
def install(self, log):
pass
def add_to_path(path, dir_path):
return "%s:%s" % (dir_path, path)
mods = [
ModuleBinutils,
ModulePregcc,
ModuleNewlib,
ModuleNcthreads,
ModuleFullgcc,
ModuleLibnaclHeaders,
ModuleLibnacl,
TestModule,
]
def all_mods(top_dir, use_shared_prefix):
nodes = []
env_vars = []
path_dirs = []
source_base = os.path.join(top_dir, "source")
if use_shared_prefix:
base_dir = os.path.join(top_dir, "shared")
prefix = os.path.join(base_dir, "prefix")
path_dirs.append(os.path.join(prefix, "bin"))
else:
base_dir = os.path.join(top_dir, "split")
prefix_base = os.path.join(base_dir, "prefixes")
for mod in mods:
if not use_shared_prefix:
# TODO: In split-prefix case, we don't really need "prefix" dirs.
# Just use the "install" dirs.
prefix = os.path.join(prefix_base, mod.name)
path_dirs.append(os.path.join(prefix, "bin"))
source_dir = os.path.join(source_base, mod.name)
build_dir = os.path.join(base_dir, "build", mod.name)
install_dir = os.path.join(base_dir, "install", mod.name)
builder = mod(source_dir, build_dir, prefix, install_dir, env_vars)
nodes.append(builder.all())
env_vars.append(("PATH",
reduce(add_to_path, path_dirs, os.environ["PATH"])))
return action_tree.make_node(nodes, name="all")
def main(args):
base_dir = os.getcwd()
top = all_mods(base_dir, use_shared_prefix=True)
action_tree.action_main(top, args)
if __name__ == "__main__":
main(sys.argv[1:])
| install | identifier_name |
build.py | #!/usr/bin/python
# Copyright 2010 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import action_tree
import cmd_env
script_dir = os.path.abspath(os.path.dirname(__file__))
# This allows "src" to be a symlink pointing to NaCl's "trunk/src".
nacl_src = os.path.join(script_dir, "src")
# Otherwise we expect to live inside the NaCl tree.
if not os.path.exists(nacl_src):
nacl_src = os.path.normpath(os.path.join(script_dir, "..", "..", ".."))
nacl_dir = os.path.join(nacl_src, "native_client")
subdirs = [
"third_party/gcc",
"third_party/binutils",
"third_party/newlib",
"native_client/tools/patches"]
search_path = [os.path.join(nacl_src, subdir) for subdir in subdirs]
def find_file(name):
for dir_path in search_path:
filename = os.path.join(dir_path, name)
if os.path.exists(filename):
return filename
raise Exception("Couldn't find %r in %r" % (name, search_path))
def get_one(lst):
assert len(lst) == 1, lst
return lst[0]
def write_file(filename, data):
fh = open(filename, "w")
try:
fh.write(data)
finally:
fh.close()
def mkdir_p(dir_path):
subprocess.check_call(["mkdir", "-p", dir_path])
class DirTree(object):
# write_tree(dest_dir) makes a fresh copy of the tree in dest_dir.
# It can assume that dest_dir is initially empty.
# The state of dest_dir is undefined if write_tree() fails.
def write_tree(self, env, dest_dir):
raise NotImplementedError()
class EmptyTree(DirTree):
def write_tree(self, env, dest_dir):
pass
class TarballTree(DirTree):
def __init__(self, tar_path):
self._tar_path = tar_path
def write_tree(self, env, dest_dir):
# Tarballs normally contain a single top-level directory with
# a name like foo-module-1.2.3. We strip this off.
assert os.listdir(dest_dir) == []
env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
# This handles gcc, where two source tarballs must be unpacked on top
# of each other.
class MultiTarballTree(DirTree):
def __init__(self, tar_paths):
self._tar_paths = tar_paths
def write_tree(self, env, dest_dir):
assert os.listdir(dest_dir) == []
for tar_file in self._tar_paths:
env.cmd(["tar", "-C", dest_dir, "-xf", tar_file])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
class PatchedTree(DirTree):
def __init__(self, orig_tree, patch_file):
self._orig_tree = orig_tree
self._patch_file = patch_file
def write_tree(self, env, dest_dir):
self._orig_tree.write_tree(env, dest_dir)
env.cmd(["patch", "-d", dest_dir, "-p1", "-i", self._patch_file])
class EnvVarEnv(object):
def __init__(self, envvars, env):
self._envvars = envvars
self._env = env
def cmd(self, args, **kwargs):
return self._env.cmd(
["env"] + ["%s=%s" % (key, value) for key, value in self._envvars]
+ args, **kwargs)
class ModuleBase(object):
def __init__(self, source_dir, build_dir, prefix, install_dir, env_vars):
self._env = cmd_env.VerboseWrapper(cmd_env.BasicEnv())
self._source_dir = source_dir
self._build_dir = build_dir
self._prefix = prefix
self._install_dir = install_dir
self._build_env = cmd_env.PrefixCmdEnv(
cmd_env.in_dir(self._build_dir), EnvVarEnv(env_vars, self._env))
self._args = {"prefix": self._prefix,
"source_dir": self._source_dir}
def all(self):
return action_tree.make_node(
[self.unpack, self.configure, self.make, self.install], self.name)
def unpack(self, log):
if not os.path.exists(self._source_dir):
temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir)
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname), | def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def install(self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" % dest]))
class ModuleNcthreads(ModuleBase):
name = "nc_threads"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "install_libpthread",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnaclHeaders(ModuleBase):
name = "libnacl_headers"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update_header",
"USE_PATH=1",
"nocpp=yes",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnacl(ModuleBase):
# Covers libnacl.a, crt[1ni].o and misc libraries built with Scons.
name = "libnacl"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class TestModule(ModuleBase):
name = "test"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
mkdir_p(self._build_dir)
write_file(os.path.join(self._build_dir, "hellow.c"), """
#include <stdio.h>
int main() {
printf("Hello world\\n");
return 0;
}
""")
self._build_env.cmd(["sh", "-c", "nacl-gcc hellow.c -o hellow"])
def install(self, log):
pass
def add_to_path(path, dir_path):
return "%s:%s" % (dir_path, path)
mods = [
ModuleBinutils,
ModulePregcc,
ModuleNewlib,
ModuleNcthreads,
ModuleFullgcc,
ModuleLibnaclHeaders,
ModuleLibnacl,
TestModule,
]
def all_mods(top_dir, use_shared_prefix):
nodes = []
env_vars = []
path_dirs = []
source_base = os.path.join(top_dir, "source")
if use_shared_prefix:
base_dir = os.path.join(top_dir, "shared")
prefix = os.path.join(base_dir, "prefix")
path_dirs.append(os.path.join(prefix, "bin"))
else:
base_dir = os.path.join(top_dir, "split")
prefix_base = os.path.join(base_dir, "prefixes")
for mod in mods:
if not use_shared_prefix:
# TODO: In split-prefix case, we don't really need "prefix" dirs.
# Just use the "install" dirs.
prefix = os.path.join(prefix_base, mod.name)
path_dirs.append(os.path.join(prefix, "bin"))
source_dir = os.path.join(source_base, mod.name)
build_dir = os.path.join(base_dir, "build", mod.name)
install_dir = os.path.join(base_dir, "install", mod.name)
builder = mod(source_dir, build_dir, prefix, install_dir, env_vars)
nodes.append(builder.all())
env_vars.append(("PATH",
reduce(add_to_path, path_dirs, os.environ["PATH"])))
return action_tree.make_node(nodes, name="all")
def main(args):
base_dir = os.getcwd()
top = all_mods(base_dir, use_shared_prefix=True)
action_tree.action_main(top, args)
if __name__ == "__main__":
main(sys.argv[1:]) | "-t", dest_dir])
| random_line_split |
build.py | #!/usr/bin/python
# Copyright 2010 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import action_tree
import cmd_env
script_dir = os.path.abspath(os.path.dirname(__file__))
# This allows "src" to be a symlink pointing to NaCl's "trunk/src".
nacl_src = os.path.join(script_dir, "src")
# Otherwise we expect to live inside the NaCl tree.
if not os.path.exists(nacl_src):
nacl_src = os.path.normpath(os.path.join(script_dir, "..", "..", ".."))
nacl_dir = os.path.join(nacl_src, "native_client")
subdirs = [
"third_party/gcc",
"third_party/binutils",
"third_party/newlib",
"native_client/tools/patches"]
search_path = [os.path.join(nacl_src, subdir) for subdir in subdirs]
def find_file(name):
for dir_path in search_path:
filename = os.path.join(dir_path, name)
if os.path.exists(filename):
return filename
raise Exception("Couldn't find %r in %r" % (name, search_path))
def get_one(lst):
assert len(lst) == 1, lst
return lst[0]
def write_file(filename, data):
fh = open(filename, "w")
try:
fh.write(data)
finally:
fh.close()
def mkdir_p(dir_path):
subprocess.check_call(["mkdir", "-p", dir_path])
class DirTree(object):
# write_tree(dest_dir) makes a fresh copy of the tree in dest_dir.
# It can assume that dest_dir is initially empty.
# The state of dest_dir is undefined if write_tree() fails.
def write_tree(self, env, dest_dir):
raise NotImplementedError()
class EmptyTree(DirTree):
def write_tree(self, env, dest_dir):
pass
class TarballTree(DirTree):
def __init__(self, tar_path):
self._tar_path = tar_path
def write_tree(self, env, dest_dir):
# Tarballs normally contain a single top-level directory with
# a name like foo-module-1.2.3. We strip this off.
assert os.listdir(dest_dir) == []
env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
# This handles gcc, where two source tarballs must be unpacked on top
# of each other.
class MultiTarballTree(DirTree):
def __init__(self, tar_paths):
self._tar_paths = tar_paths
def write_tree(self, env, dest_dir):
assert os.listdir(dest_dir) == []
for tar_file in self._tar_paths:
env.cmd(["tar", "-C", dest_dir, "-xf", tar_file])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
class PatchedTree(DirTree):
def __init__(self, orig_tree, patch_file):
self._orig_tree = orig_tree
self._patch_file = patch_file
def write_tree(self, env, dest_dir):
self._orig_tree.write_tree(env, dest_dir)
env.cmd(["patch", "-d", dest_dir, "-p1", "-i", self._patch_file])
class EnvVarEnv(object):
def __init__(self, envvars, env):
self._envvars = envvars
self._env = env
def cmd(self, args, **kwargs):
return self._env.cmd(
["env"] + ["%s=%s" % (key, value) for key, value in self._envvars]
+ args, **kwargs)
class ModuleBase(object):
def __init__(self, source_dir, build_dir, prefix, install_dir, env_vars):
self._env = cmd_env.VerboseWrapper(cmd_env.BasicEnv())
self._source_dir = source_dir
self._build_dir = build_dir
self._prefix = prefix
self._install_dir = install_dir
self._build_env = cmd_env.PrefixCmdEnv(
cmd_env.in_dir(self._build_dir), EnvVarEnv(env_vars, self._env))
self._args = {"prefix": self._prefix,
"source_dir": self._source_dir}
def all(self):
return action_tree.make_node(
[self.unpack, self.configure, self.make, self.install], self.name)
def unpack(self, log):
if not os.path.exists(self._source_dir):
temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir)
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname),
"-t", dest_dir])
def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def install(self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" % dest]))
class ModuleNcthreads(ModuleBase):
name = "nc_threads"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "install_libpthread",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnaclHeaders(ModuleBase):
name = "libnacl_headers"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update_header",
"USE_PATH=1",
"nocpp=yes",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnacl(ModuleBase):
# Covers libnacl.a, crt[1ni].o and misc libraries built with Scons.
|
class TestModule(ModuleBase):
name = "test"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
mkdir_p(self._build_dir)
write_file(os.path.join(self._build_dir, "hellow.c"), """
#include <stdio.h>
int main() {
printf("Hello world\\n");
return 0;
}
""")
self._build_env.cmd(["sh", "-c", "nacl-gcc hellow.c -o hellow"])
def install(self, log):
pass
def add_to_path(path, dir_path):
return "%s:%s" % (dir_path, path)
mods = [
ModuleBinutils,
ModulePregcc,
ModuleNewlib,
ModuleNcthreads,
ModuleFullgcc,
ModuleLibnaclHeaders,
ModuleLibnacl,
TestModule,
]
def all_mods(top_dir, use_shared_prefix):
nodes = []
env_vars = []
path_dirs = []
source_base = os.path.join(top_dir, "source")
if use_shared_prefix:
base_dir = os.path.join(top_dir, "shared")
prefix = os.path.join(base_dir, "prefix")
path_dirs.append(os.path.join(prefix, "bin"))
else:
base_dir = os.path.join(top_dir, "split")
prefix_base = os.path.join(base_dir, "prefixes")
for mod in mods:
if not use_shared_prefix:
# TODO: In split-prefix case, we don't really need "prefix" dirs.
# Just use the "install" dirs.
prefix = os.path.join(prefix_base, mod.name)
path_dirs.append(os.path.join(prefix, "bin"))
source_dir = os.path.join(source_base, mod.name)
build_dir = os.path.join(base_dir, "build", mod.name)
install_dir = os.path.join(base_dir, "install", mod.name)
builder = mod(source_dir, build_dir, prefix, install_dir, env_vars)
nodes.append(builder.all())
env_vars.append(("PATH",
reduce(add_to_path, path_dirs, os.environ["PATH"])))
return action_tree.make_node(nodes, name="all")
def main(args):
base_dir = os.getcwd()
top = all_mods(base_dir, use_shared_prefix=True)
action_tree.action_main(top, args)
if __name__ == "__main__":
main(sys.argv[1:])
| name = "libnacl"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make) | identifier_body |
response.rs | use hyper::header::{self, CookiePair as Cookie, ContentType, Header, SetCookie};
use hyper::status::StatusCode as Status;
use hyper::Headers;
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use serde_json::value as json;
use serde_json::value::ToJson;
use std::any::Any;
use std::boxed::Box;
use std::borrow::Cow;
use std::{error, fmt, result};
use std::fs::File;
use std::io::{self, ErrorKind, Read, Write};
use std::path::Path;
/// Defines a handler error
#[derive(Debug)]
pub struct Error {
pub status: Status,
pub message: Option<Cow<'static, str>>
}
pub type Result = result::Result<Action, Error>;
impl Error {
fn new(status: Status, message: Option<Cow<'static, str>>) -> Error {
Error {
status: status,
message: message
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use std::error::Error;
self.description().fmt(f)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match self.message {
None => "<no description available>",
Some(ref message) => &message
}
}
fn | (&self) -> Option<&error::Error> {
None
}
}
impl From<Status> for Error {
fn from(status: Status) -> Error {
Error::new(status, None)
}
}
impl From<(Status, &'static str)> for Error {
fn from(pair: (Status, &'static str)) -> Error {
Error::new(pair.0, Some(Cow::Borrowed(pair.1)))
}
}
impl From<(Status, String)> for Error {
fn from(pair: (Status, String)) -> Error {
Error::new(pair.0, Some(Cow::Owned(pair.1)))
}
}
/// Defines the action to be taken when returning from a handler
pub enum Action {
/// Ends the response with no body and the given status (if given).
///
/// If the status is not given, the status currently set on the response is used.
/// By default, a response has a status 200 OK.
End(Option<Status>),
/// Redirects to the given URL with a 3xx status (use 302 Found if unsure).
Redirect(Status, String),
/// Renders the template with the given name using the given JSON value.
///
/// If no Content-Type header is set, the content type is set to `text/html`.
Render(String, json::Value),
/// Sends the response with the given bytes as the body.
Send(Vec<u8>),
/// Returns a closure that is called with a Stream argument.
Stream(Box<Fn(&mut Any, &mut Write)>),
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
SendFile(String)
}
/// Conversion from `()` into `End(None)`.
impl From<()> for Action {
fn from(_: ()) -> Action {
Action::End(None)
}
}
/// Conversion from `Status` into `End(Some(status))`.
impl From<Status> for Action {
fn from(status: Status) -> Action {
Action::End(Some(status))
}
}
/// Conversion from `(Status, &str)` into `Action::Redirect(status, url)`.
impl<'a> From<(Status, &'a str)> for Action {
fn from(pair: (Status, &'a str)) -> Action {
Action::Redirect(pair.0, pair.1.to_string())
}
}
/// Conversion from `(Status, String)` into `Action::Redirect(status, url)`.
impl From<(Status, String)> for Action {
fn from(pair: (Status, String)) -> Action {
From::from((pair.0, pair.1.as_str()))
}
}
/// Conversion from `(&str, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<'a, T> From<(&'a str, T)> for Action where T: ToJson {
fn from(pair: (&'a str, T)) -> Action {
Action::Render(pair.0.to_string(), pair.1.to_json())
}
}
/// Conversion from `(String, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<T> From<(String, T)> for Action where T: ToJson {
fn from(pair: (String, T)) -> Action {
Action::Render(pair.0, pair.1.to_json())
}
}
/// Conversion from `Vec<u8>` into `Action::Send(bytes)`.
impl From<Vec<u8>> for Action {
fn from(bytes: Vec<u8>) -> Action {
Action::Send(bytes)
}
}
/// Conversion from `&str` into `Action::Send(bytes)`.
impl<'a> From<&'a str> for Action {
fn from(string: &'a str) -> Action {
Action::Send(string.as_bytes().to_vec())
}
}
/// Conversion from `String` into `Action::Send(bytes)`.
impl From<String> for Action {
fn from(string: String) -> Action {
Action::Send(string.into_bytes())
}
}
/// Conversion from `json::Value` into `Action::Send(bytes)`.
impl From<json::Value> for Action {
fn from(json: json::Value) -> Action {
From::from(json.to_string())
}
}
/// Wraps the given closure in a box and returns `Ok(Action::Stream(box))`.
///
/// The closure will be called with a writer implementing the `Write` trait
/// so that each call to `write` notifies the handler that data can be written
/// to the HTTP transport.
pub fn stream<F, T, R>(closure: F) -> Result where T: Any, F: 'static + Fn(&mut T, &mut Write) -> io::Result<R> {
Ok(Action::Stream(Box::new(move |any, writer| {
if let Some(app) = any.downcast_mut::<T>() {
if let Err(e) = closure(app, writer) {
error!("{}", e);
}
}
})))
}
/// This represents the response that will be sent back to the application.
///
/// Includes a status code (default 200 OK), headers, and a body.
/// The response can be updated and sent back immediately in a synchronous way,
/// or deferred pending some computation (asynchronous mode).
///
/// The response is sent when it is dropped.
pub struct Response {
pub status: Status,
pub headers: Headers,
streaming: bool
}
impl Response {
pub fn new() -> Response {
Response {
status: Status::Ok,
headers: Headers::default(),
streaming: false
}
}
/// Sets the status code of this response.
pub fn status(&mut self, status: Status) -> &mut Self {
self.status = status;
self
}
/// Sets the Content-Type header.
pub fn content_type<S: Into<Vec<u8>>>(&mut self, mime: S) -> &mut Self {
self.headers.set_raw("Content-Type", vec![mime.into()]);
self
}
/// Sets the Content-Length header.
pub fn len(&mut self, len: u64) -> &mut Self {
self.headers.set(header::ContentLength(len));
self
}
/// Sets the given cookie.
pub fn cookie(&mut self, cookie: Cookie) {
if self.headers.has::<SetCookie>() {
self.headers.get_mut::<SetCookie>().unwrap().push(cookie)
} else {
self.headers.set(SetCookie(vec![cookie]))
}
}
/// Sets the given header.
pub fn header<H: Header>(&mut self, header: H) -> &mut Self {
self.headers.set(header);
self
}
/// Sets the given header with raw strings.
pub fn header_raw<K: Into<Cow<'static, str>> + fmt::Debug, V: Into<Vec<u8>>>(&mut self, name: K, value: V) -> &mut Self {
self.headers.set_raw(name, vec![value.into()]);
self
}
/// Sets the Location header.
pub fn location<S: Into<String>>(&mut self, url: S) -> &mut Self {
self.headers.set(header::Location(url.into()));
self
}
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
fn send_file<P: AsRef<Path>>(&mut self, path: P) -> Option<Vec<u8>> {
if !self.headers.has::<ContentType>() {
let extension = path.as_ref().extension();
if let Some(ext) = extension {
let content_type = match ext.to_string_lossy().as_ref() {
// application
"js" => Some(("application", "javascript", None)),
"m3u8" => Some(("application", "vnd.apple.mpegurl", None)),
"mpd" => Some(("application", "dash+xml", None)),
"xml" => Some(("application", "xml", None)),
// image
"gif" => Some(("image", "gif", None)),
"jpg" | "jpeg" => Some(("image", "jpeg", None)),
"png" => Some(("image", "png", None)),
// text
"css" => Some(("text", "css", None)),
"htm" | "html" => Some(("text", "html", Some((Attr::Charset, Value::Utf8)))),
"txt" => Some(("text", "plain", Some((Attr::Charset, Value::Utf8)))),
// video
"avi" => Some(("video", "x-msvideo", None)),
"mp4" => Some(("video", "mp4", None)),
"mpg" | "mpeg" => Some(("video", "mpeg", None)),
"ts" => Some(("video", "mp2t", None)),
_ => None
};
if let Some((top, sub, attr)) = content_type {
self.headers.set(ContentType(Mime(TopLevel::Ext(top.to_string()),
SubLevel::Ext(sub.to_string()),
match attr {
None => vec![],
Some(val) => vec![val]
}
)));
}
}
}
// read the whole file at once and send it
// probably not the best idea for big files, we should use stream instead in that case
match File::open(path) {
Ok(mut file) => {
let mut buf = Vec::with_capacity(file.metadata().ok().map_or(1024, |meta| meta.len() as usize));
if let Err(err) = file.read_to_end(&mut buf) {
self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
} else {
Some(buf)
}
},
Err(ref err) if err.kind() == ErrorKind::NotFound => {
self.status(Status::NotFound);
None
},
Err(ref err) => {
self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
}
}
}
}
pub fn send_file<P: AsRef<Path>>(response: &mut Response, path: P) -> Option<Vec<u8>> {
response.send_file(path)
}
pub fn set_streaming(response: &mut Response) {
response.streaming = true;
}
pub fn is_streaming(response: &Response) -> bool {
response.streaming
}
| cause | identifier_name |
response.rs | use hyper::header::{self, CookiePair as Cookie, ContentType, Header, SetCookie};
use hyper::status::StatusCode as Status;
use hyper::Headers;
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use serde_json::value as json;
use serde_json::value::ToJson;
use std::any::Any;
use std::boxed::Box;
use std::borrow::Cow;
use std::{error, fmt, result};
use std::fs::File;
use std::io::{self, ErrorKind, Read, Write};
use std::path::Path;
/// Defines a handler error
#[derive(Debug)]
pub struct Error {
pub status: Status,
pub message: Option<Cow<'static, str>>
}
pub type Result = result::Result<Action, Error>;
impl Error {
fn new(status: Status, message: Option<Cow<'static, str>>) -> Error {
Error {
status: status,
message: message
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use std::error::Error;
self.description().fmt(f)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match self.message {
None => "<no description available>",
Some(ref message) => &message
}
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl From<Status> for Error {
fn from(status: Status) -> Error {
Error::new(status, None)
}
}
impl From<(Status, &'static str)> for Error {
fn from(pair: (Status, &'static str)) -> Error {
Error::new(pair.0, Some(Cow::Borrowed(pair.1)))
}
}
impl From<(Status, String)> for Error {
fn from(pair: (Status, String)) -> Error {
Error::new(pair.0, Some(Cow::Owned(pair.1)))
}
}
/// Defines the action to be taken when returning from a handler
pub enum Action {
/// Ends the response with no body and the given status (if given).
///
/// If the status is not given, the status currently set on the response is used.
/// By default, a response has a status 200 OK.
End(Option<Status>),
/// Redirects to the given URL with a 3xx status (use 302 Found if unsure).
Redirect(Status, String),
/// Renders the template with the given name using the given JSON value.
///
/// If no Content-Type header is set, the content type is set to `text/html`.
Render(String, json::Value),
/// Sends the response with the given bytes as the body.
Send(Vec<u8>),
/// Returns a closure that is called with a Stream argument.
Stream(Box<Fn(&mut Any, &mut Write)>),
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
SendFile(String)
}
/// Conversion from `()` into `End(None)`.
impl From<()> for Action {
fn from(_: ()) -> Action {
Action::End(None)
}
}
/// Conversion from `Status` into `End(Some(status))`.
impl From<Status> for Action {
fn from(status: Status) -> Action {
Action::End(Some(status))
}
}
/// Conversion from `(Status, &str)` into `Action::Redirect(status, url)`.
impl<'a> From<(Status, &'a str)> for Action {
fn from(pair: (Status, &'a str)) -> Action {
Action::Redirect(pair.0, pair.1.to_string())
}
}
/// Conversion from `(Status, String)` into `Action::Redirect(status, url)`.
impl From<(Status, String)> for Action {
fn from(pair: (Status, String)) -> Action {
From::from((pair.0, pair.1.as_str()))
}
}
/// Conversion from `(&str, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<'a, T> From<(&'a str, T)> for Action where T: ToJson {
fn from(pair: (&'a str, T)) -> Action {
Action::Render(pair.0.to_string(), pair.1.to_json())
}
}
/// Conversion from `(String, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<T> From<(String, T)> for Action where T: ToJson {
fn from(pair: (String, T)) -> Action {
Action::Render(pair.0, pair.1.to_json())
}
}
/// Conversion from `Vec<u8>` into `Action::Send(bytes)`.
impl From<Vec<u8>> for Action {
fn from(bytes: Vec<u8>) -> Action {
Action::Send(bytes)
}
}
/// Conversion from `&str` into `Action::Send(bytes)`.
impl<'a> From<&'a str> for Action {
fn from(string: &'a str) -> Action {
Action::Send(string.as_bytes().to_vec())
}
}
/// Conversion from `String` into `Action::Send(bytes)`.
impl From<String> for Action {
fn from(string: String) -> Action {
Action::Send(string.into_bytes())
}
}
/// Conversion from `json::Value` into `Action::Send(bytes)`.
impl From<json::Value> for Action {
fn from(json: json::Value) -> Action {
From::from(json.to_string())
}
}
/// Wraps the given closure in a box and returns `Ok(Action::Stream(box))`.
///
/// The closure will be called with a writer implementing the `Write` trait
/// so that each call to `write` notifies the handler that data can be written
/// to the HTTP transport.
pub fn stream<F, T, R>(closure: F) -> Result where T: Any, F: 'static + Fn(&mut T, &mut Write) -> io::Result<R> {
Ok(Action::Stream(Box::new(move |any, writer| {
if let Some(app) = any.downcast_mut::<T>() {
if let Err(e) = closure(app, writer) {
error!("{}", e);
}
}
})))
}
/// This represents the response that will be sent back to the application.
///
/// Includes a status code (default 200 OK), headers, and a body.
/// The response can be updated and sent back immediately in a synchronous way,
/// or deferred pending some computation (asynchronous mode).
///
/// The response is sent when it is dropped.
pub struct Response {
pub status: Status,
pub headers: Headers,
streaming: bool
}
impl Response {
pub fn new() -> Response {
Response {
status: Status::Ok,
headers: Headers::default(),
streaming: false
}
}
/// Sets the status code of this response.
pub fn status(&mut self, status: Status) -> &mut Self {
self.status = status;
self
}
/// Sets the Content-Type header.
pub fn content_type<S: Into<Vec<u8>>>(&mut self, mime: S) -> &mut Self {
self.headers.set_raw("Content-Type", vec![mime.into()]);
self
}
/// Sets the Content-Length header.
pub fn len(&mut self, len: u64) -> &mut Self {
self.headers.set(header::ContentLength(len));
self
}
/// Sets the given cookie.
pub fn cookie(&mut self, cookie: Cookie) {
if self.headers.has::<SetCookie>() {
self.headers.get_mut::<SetCookie>().unwrap().push(cookie)
} else {
self.headers.set(SetCookie(vec![cookie]))
}
}
/// Sets the given header.
pub fn header<H: Header>(&mut self, header: H) -> &mut Self {
self.headers.set(header);
self
}
/// Sets the given header with raw strings.
pub fn header_raw<K: Into<Cow<'static, str>> + fmt::Debug, V: Into<Vec<u8>>>(&mut self, name: K, value: V) -> &mut Self {
self.headers.set_raw(name, vec![value.into()]);
self
}
/// Sets the Location header.
pub fn location<S: Into<String>>(&mut self, url: S) -> &mut Self {
self.headers.set(header::Location(url.into()));
self
}
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
fn send_file<P: AsRef<Path>>(&mut self, path: P) -> Option<Vec<u8>> {
if !self.headers.has::<ContentType>() {
let extension = path.as_ref().extension();
if let Some(ext) = extension {
let content_type = match ext.to_string_lossy().as_ref() {
// application
"js" => Some(("application", "javascript", None)),
"m3u8" => Some(("application", "vnd.apple.mpegurl", None)),
"mpd" => Some(("application", "dash+xml", None)),
"xml" => Some(("application", "xml", None)),
// image
"gif" => Some(("image", "gif", None)),
"jpg" | "jpeg" => Some(("image", "jpeg", None)),
"png" => Some(("image", "png", None)),
// text
"css" => Some(("text", "css", None)),
"htm" | "html" => Some(("text", "html", Some((Attr::Charset, Value::Utf8)))),
"txt" => Some(("text", "plain", Some((Attr::Charset, Value::Utf8)))),
// video
"avi" => Some(("video", "x-msvideo", None)),
"mp4" => Some(("video", "mp4", None)),
"mpg" | "mpeg" => Some(("video", "mpeg", None)),
"ts" => Some(("video", "mp2t", None)),
_ => None
};
if let Some((top, sub, attr)) = content_type {
self.headers.set(ContentType(Mime(TopLevel::Ext(top.to_string()),
SubLevel::Ext(sub.to_string()),
match attr {
None => vec![],
Some(val) => vec![val]
}
)));
}
}
}
// read the whole file at once and send it
// probably not the best idea for big files, we should use stream instead in that case
match File::open(path) {
Ok(mut file) => {
let mut buf = Vec::with_capacity(file.metadata().ok().map_or(1024, |meta| meta.len() as usize));
if let Err(err) = file.read_to_end(&mut buf) {
self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
} else {
Some(buf)
}
},
Err(ref err) if err.kind() == ErrorKind::NotFound => {
self.status(Status::NotFound);
None
},
Err(ref err) => {
self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
}
}
}
}
pub fn send_file<P: AsRef<Path>>(response: &mut Response, path: P) -> Option<Vec<u8>> {
response.send_file(path)
}
pub fn set_streaming(response: &mut Response) {
response.streaming = true;
}
pub fn is_streaming(response: &Response) -> bool | {
response.streaming
} | identifier_body |
|
response.rs | use hyper::header::{self, CookiePair as Cookie, ContentType, Header, SetCookie};
use hyper::status::StatusCode as Status;
use hyper::Headers;
use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value};
use serde_json::value as json;
use serde_json::value::ToJson;
use std::any::Any;
use std::boxed::Box;
use std::borrow::Cow;
use std::{error, fmt, result};
use std::fs::File;
use std::io::{self, ErrorKind, Read, Write};
use std::path::Path;
/// Defines a handler error
#[derive(Debug)]
pub struct Error {
pub status: Status,
pub message: Option<Cow<'static, str>>
}
pub type Result = result::Result<Action, Error>;
impl Error {
fn new(status: Status, message: Option<Cow<'static, str>>) -> Error {
Error {
status: status,
message: message
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use std::error::Error;
self.description().fmt(f)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match self.message {
None => "<no description available>",
Some(ref message) => &message
}
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl From<Status> for Error {
fn from(status: Status) -> Error {
Error::new(status, None)
}
}
impl From<(Status, &'static str)> for Error {
fn from(pair: (Status, &'static str)) -> Error {
Error::new(pair.0, Some(Cow::Borrowed(pair.1)))
}
}
impl From<(Status, String)> for Error {
fn from(pair: (Status, String)) -> Error {
Error::new(pair.0, Some(Cow::Owned(pair.1)))
}
}
/// Defines the action to be taken when returning from a handler
pub enum Action {
/// Ends the response with no body and the given status (if given).
///
/// If the status is not given, the status currently set on the response is used.
/// By default, a response has a status 200 OK.
End(Option<Status>),
/// Redirects to the given URL with a 3xx status (use 302 Found if unsure).
Redirect(Status, String),
/// Renders the template with the given name using the given JSON value.
///
/// If no Content-Type header is set, the content type is set to `text/html`.
Render(String, json::Value),
/// Sends the response with the given bytes as the body.
Send(Vec<u8>),
/// Returns a closure that is called with a Stream argument.
Stream(Box<Fn(&mut Any, &mut Write)>),
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
SendFile(String)
}
/// Conversion from `()` into `End(None)`.
impl From<()> for Action {
fn from(_: ()) -> Action {
Action::End(None)
}
}
/// Conversion from `Status` into `End(Some(status))`.
impl From<Status> for Action {
fn from(status: Status) -> Action {
Action::End(Some(status))
}
}
/// Conversion from `(Status, &str)` into `Action::Redirect(status, url)`.
impl<'a> From<(Status, &'a str)> for Action {
fn from(pair: (Status, &'a str)) -> Action {
Action::Redirect(pair.0, pair.1.to_string())
}
}
/// Conversion from `(Status, String)` into `Action::Redirect(status, url)`.
impl From<(Status, String)> for Action {
fn from(pair: (Status, String)) -> Action {
From::from((pair.0, pair.1.as_str()))
}
}
/// Conversion from `(&str, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<'a, T> From<(&'a str, T)> for Action where T: ToJson {
fn from(pair: (&'a str, T)) -> Action {
Action::Render(pair.0.to_string(), pair.1.to_json())
}
}
/// Conversion from `(String, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<T> From<(String, T)> for Action where T: ToJson {
fn from(pair: (String, T)) -> Action {
Action::Render(pair.0, pair.1.to_json())
}
}
/// Conversion from `Vec<u8>` into `Action::Send(bytes)`.
impl From<Vec<u8>> for Action {
fn from(bytes: Vec<u8>) -> Action {
Action::Send(bytes)
}
}
/// Conversion from `&str` into `Action::Send(bytes)`.
impl<'a> From<&'a str> for Action {
fn from(string: &'a str) -> Action {
Action::Send(string.as_bytes().to_vec())
}
}
/// Conversion from `String` into `Action::Send(bytes)`.
impl From<String> for Action {
fn from(string: String) -> Action {
Action::Send(string.into_bytes())
}
}
/// Conversion from `json::Value` into `Action::Send(bytes)`.
impl From<json::Value> for Action {
fn from(json: json::Value) -> Action {
From::from(json.to_string())
}
}
/// Wraps the given closure in a box and returns `Ok(Action::Stream(box))`.
///
/// The closure will be called with a writer implementing the `Write` trait
/// so that each call to `write` notifies the handler that data can be written
/// to the HTTP transport.
pub fn stream<F, T, R>(closure: F) -> Result where T: Any, F: 'static + Fn(&mut T, &mut Write) -> io::Result<R> {
Ok(Action::Stream(Box::new(move |any, writer| {
if let Some(app) = any.downcast_mut::<T>() {
if let Err(e) = closure(app, writer) {
error!("{}", e);
}
}
})))
}
/// This represents the response that will be sent back to the application.
///
/// Includes a status code (default 200 OK), headers, and a body.
/// The response can be updated and sent back immediately in a synchronous way,
/// or deferred pending some computation (asynchronous mode).
///
/// The response is sent when it is dropped.
pub struct Response {
pub status: Status,
pub headers: Headers,
streaming: bool
}
impl Response {
pub fn new() -> Response {
Response {
status: Status::Ok,
headers: Headers::default(),
streaming: false
}
}
/// Sets the status code of this response.
pub fn status(&mut self, status: Status) -> &mut Self {
self.status = status;
self
}
/// Sets the Content-Type header.
pub fn content_type<S: Into<Vec<u8>>>(&mut self, mime: S) -> &mut Self {
self.headers.set_raw("Content-Type", vec![mime.into()]);
self
}
/// Sets the Content-Length header.
pub fn len(&mut self, len: u64) -> &mut Self {
self.headers.set(header::ContentLength(len));
self
}
/// Sets the given cookie.
pub fn cookie(&mut self, cookie: Cookie) {
if self.headers.has::<SetCookie>() {
self.headers.get_mut::<SetCookie>().unwrap().push(cookie)
} else {
self.headers.set(SetCookie(vec![cookie]))
}
}
/// Sets the given header.
pub fn header<H: Header>(&mut self, header: H) -> &mut Self {
self.headers.set(header);
self
}
/// Sets the given header with raw strings.
pub fn header_raw<K: Into<Cow<'static, str>> + fmt::Debug, V: Into<Vec<u8>>>(&mut self, name: K, value: V) -> &mut Self {
self.headers.set_raw(name, vec![value.into()]);
self
}
/// Sets the Location header.
pub fn location<S: Into<String>>(&mut self, url: S) -> &mut Self {
self.headers.set(header::Location(url.into()));
self
}
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
fn send_file<P: AsRef<Path>>(&mut self, path: P) -> Option<Vec<u8>> {
if !self.headers.has::<ContentType>() {
let extension = path.as_ref().extension();
if let Some(ext) = extension {
let content_type = match ext.to_string_lossy().as_ref() {
// application
"js" => Some(("application", "javascript", None)),
"m3u8" => Some(("application", "vnd.apple.mpegurl", None)),
"mpd" => Some(("application", "dash+xml", None)),
"xml" => Some(("application", "xml", None)),
// image
"gif" => Some(("image", "gif", None)),
"jpg" | "jpeg" => Some(("image", "jpeg", None)),
"png" => Some(("image", "png", None)),
// text
"css" => Some(("text", "css", None)),
"htm" | "html" => Some(("text", "html", Some((Attr::Charset, Value::Utf8)))),
"txt" => Some(("text", "plain", Some((Attr::Charset, Value::Utf8)))),
// video
"avi" => Some(("video", "x-msvideo", None)),
"mp4" => Some(("video", "mp4", None)),
"mpg" | "mpeg" => Some(("video", "mpeg", None)),
"ts" => Some(("video", "mp2t", None)),
_ => None
};
if let Some((top, sub, attr)) = content_type {
self.headers.set(ContentType(Mime(TopLevel::Ext(top.to_string()),
SubLevel::Ext(sub.to_string()),
match attr {
None => vec![],
Some(val) => vec![val]
}
)));
}
}
}
// read the whole file at once and send it | self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
} else {
Some(buf)
}
},
Err(ref err) if err.kind() == ErrorKind::NotFound => {
self.status(Status::NotFound);
None
},
Err(ref err) => {
self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
}
}
}
}
pub fn send_file<P: AsRef<Path>>(response: &mut Response, path: P) -> Option<Vec<u8>> {
response.send_file(path)
}
pub fn set_streaming(response: &mut Response) {
response.streaming = true;
}
pub fn is_streaming(response: &Response) -> bool {
response.streaming
} | // probably not the best idea for big files, we should use stream instead in that case
match File::open(path) {
Ok(mut file) => {
let mut buf = Vec::with_capacity(file.metadata().ok().map_or(1024, |meta| meta.len() as usize));
if let Err(err) = file.read_to_end(&mut buf) { | random_line_split |
node.go | // Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package node
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"text/template"
"time"
"emperror.dev/errors"
"github.com/banzaicloud/pke/cmd/pke/app/config"
"github.com/banzaicloud/pke/cmd/pke/app/constants"
"github.com/banzaicloud/pke/cmd/pke/app/phases"
"github.com/banzaicloud/pke/cmd/pke/app/phases/kubeadm"
"github.com/banzaicloud/pke/cmd/pke/app/util/file"
"github.com/banzaicloud/pke/cmd/pke/app/util/flags"
"github.com/banzaicloud/pke/cmd/pke/app/util/linux"
pipelineutil "github.com/banzaicloud/pke/cmd/pke/app/util/pipeline"
"github.com/banzaicloud/pke/cmd/pke/app/util/runner"
"github.com/banzaicloud/pke/cmd/pke/app/util/validator"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
use = "kubernetes-node"
short = "Kubernetes worker node installation"
cmdKubeadm = "kubeadm"
kubeProxyConfig = "/var/lib/kube-proxy/config.conf"
kubeadmConfig = "/etc/kubernetes/kubeadm.conf"
kubeadmAmazonConfig = "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string { |
func (n *Node) RegisterFlags(flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil {
return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil {
return
}
}
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node) install(out io.Writer) error {
// write kubeadm config
if err := n.writeKubeadmConfig(out, kubeadmConfig); err != nil {
return err
}
err := writeKubeProxyConfig(out, kubeProxyConfig)
if err != nil {
return err
}
// write kubeadm aws.conf
err = kubeadm.WriteKubeadmAmazonConfig(out, kubeadmAmazonConfig, n.cloudProvider)
if err != nil {
return err
}
// write kubeadm azure.conf
err = kubeadm.WriteKubeadmAzureConfig(out, kubeadmAzureConfig, n.cloudProvider, n.azureTenantID, n.azureSubnetName, n.azureSecurityGroupName, n.azureVNetName, n.azureVNetResourceGroup, n.azureVMType, n.azureLoadBalancerSku, n.azureRouteTableName, true)
if err != nil {
return err
}
// create cni directory
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, cniDir)
if err := os.MkdirAll(cniDir, 0755); err != nil {
return err
}
// CNI network bridge
if err := writeCNIBridge(out, n.cloudProvider, n.podNetworkCIDR, cniBridgeConfig); err != nil {
return err
}
// CNI network loopback
if err := writeCNILoopback(out, n.cloudProvider, cniLoopbackConfig); err != nil {
return err
}
for i := 0; i < maxJoinRetries; i++ {
var ll string
// kubeadm join 10.240.0.11:6443 --token 0uk28q.e5i6ewi7xb0g8ye9 --discovery-token-ca-cert-hash sha256:a1a74c00ecccf947b69b49172390018096affbbae25447c4bd0c0906273c1482 --cri-socket=unix:///run/containerd/containerd.sock
ll, err = runner.Cmd(out, cmdKubeadm, "join", "--config="+kubeadmConfig).CombinedOutputAsync()
if err == nil {
break
}
// re-run command on connection refused error
// couldn't validate the identity of the API Server: abort connecting to API servers after timeout of 5m0s
if !strings.Contains(ll, "connection refused") && !strings.Contains(ll, "timeout") {
return err
}
_, _ = fmt.Fprintf(out, "[%s] re-run %q command\n", use, cmdKubeadm)
time.Sleep(time.Second)
}
if err != nil {
return err
}
return linux.SystemctlEnableAndStart(out, "kubelet")
}
//go:generate templify -t ${GOTMPL} -p node -f kubeProxyConfig kube_proxy_config.yaml.tmpl
func writeKubeProxyConfig(out io.Writer, filename string) error {
dir := filepath.Dir(filename)
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, dir)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
return file.Overwrite(filename, kubeProxyConfigTemplate())
}
//go:generate templify -t ${GOTMPL} -p node -f cniBridge cni_bridge.json.tmpl
func writeCNIBridge(out io.Writer, cloudProvider, podNetworkCIDR, filename string) error {
if cloudProvider != constants.CloudProviderAzure || podNetworkCIDR == "" {
return nil
}
tmpl, err := template.New("cni-bridge").Parse(cniBridgeTemplate())
if err != nil {
return err
}
type data struct {
PodNetworkCIDR string
}
d := data{
PodNetworkCIDR: podNetworkCIDR,
}
return file.WriteTemplate(filename, tmpl, d)
}
//go:generate templify -t ${GOTMPL} -p node -f cniLoopback cni_loopback.json.tmpl
func writeCNILoopback(out io.Writer, cloudProvider, filename string) error {
if cloudProvider != constants.CloudProviderAzure {
return nil
}
return file.Overwrite(filename, cniLoopbackTemplate())
}
|
return short
}
| identifier_body |
node.go | // Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package node
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"text/template"
"time"
"emperror.dev/errors"
"github.com/banzaicloud/pke/cmd/pke/app/config"
"github.com/banzaicloud/pke/cmd/pke/app/constants"
"github.com/banzaicloud/pke/cmd/pke/app/phases"
"github.com/banzaicloud/pke/cmd/pke/app/phases/kubeadm"
"github.com/banzaicloud/pke/cmd/pke/app/util/file"
"github.com/banzaicloud/pke/cmd/pke/app/util/flags"
"github.com/banzaicloud/pke/cmd/pke/app/util/linux"
pipelineutil "github.com/banzaicloud/pke/cmd/pke/app/util/pipeline"
"github.com/banzaicloud/pke/cmd/pke/app/util/runner"
"github.com/banzaicloud/pke/cmd/pke/app/util/validator"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
use = "kubernetes-node"
short = "Kubernetes worker node installation"
cmdKubeadm = "kubeadm"
kubeProxyConfig = "/var/lib/kube-proxy/config.conf"
kubeadmConfig = "/etc/kubernetes/kubeadm.conf"
kubeadmAmazonConfig = "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string {
return short
}
func (n *Node) RegisterFlags(flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil {
return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil { | }
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node) install(out io.Writer) error {
// write kubeadm config
if err := n.writeKubeadmConfig(out, kubeadmConfig); err != nil {
return err
}
err := writeKubeProxyConfig(out, kubeProxyConfig)
if err != nil {
return err
}
// write kubeadm aws.conf
err = kubeadm.WriteKubeadmAmazonConfig(out, kubeadmAmazonConfig, n.cloudProvider)
if err != nil {
return err
}
// write kubeadm azure.conf
err = kubeadm.WriteKubeadmAzureConfig(out, kubeadmAzureConfig, n.cloudProvider, n.azureTenantID, n.azureSubnetName, n.azureSecurityGroupName, n.azureVNetName, n.azureVNetResourceGroup, n.azureVMType, n.azureLoadBalancerSku, n.azureRouteTableName, true)
if err != nil {
return err
}
// create cni directory
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, cniDir)
if err := os.MkdirAll(cniDir, 0755); err != nil {
return err
}
// CNI network bridge
if err := writeCNIBridge(out, n.cloudProvider, n.podNetworkCIDR, cniBridgeConfig); err != nil {
return err
}
// CNI network loopback
if err := writeCNILoopback(out, n.cloudProvider, cniLoopbackConfig); err != nil {
return err
}
for i := 0; i < maxJoinRetries; i++ {
var ll string
// kubeadm join 10.240.0.11:6443 --token 0uk28q.e5i6ewi7xb0g8ye9 --discovery-token-ca-cert-hash sha256:a1a74c00ecccf947b69b49172390018096affbbae25447c4bd0c0906273c1482 --cri-socket=unix:///run/containerd/containerd.sock
ll, err = runner.Cmd(out, cmdKubeadm, "join", "--config="+kubeadmConfig).CombinedOutputAsync()
if err == nil {
break
}
// re-run command on connection refused error
// couldn't validate the identity of the API Server: abort connecting to API servers after timeout of 5m0s
if !strings.Contains(ll, "connection refused") && !strings.Contains(ll, "timeout") {
return err
}
_, _ = fmt.Fprintf(out, "[%s] re-run %q command\n", use, cmdKubeadm)
time.Sleep(time.Second)
}
if err != nil {
return err
}
return linux.SystemctlEnableAndStart(out, "kubelet")
}
//go:generate templify -t ${GOTMPL} -p node -f kubeProxyConfig kube_proxy_config.yaml.tmpl
func writeKubeProxyConfig(out io.Writer, filename string) error {
dir := filepath.Dir(filename)
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, dir)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
return file.Overwrite(filename, kubeProxyConfigTemplate())
}
//go:generate templify -t ${GOTMPL} -p node -f cniBridge cni_bridge.json.tmpl
func writeCNIBridge(out io.Writer, cloudProvider, podNetworkCIDR, filename string) error {
if cloudProvider != constants.CloudProviderAzure || podNetworkCIDR == "" {
return nil
}
tmpl, err := template.New("cni-bridge").Parse(cniBridgeTemplate())
if err != nil {
return err
}
type data struct {
PodNetworkCIDR string
}
d := data{
PodNetworkCIDR: podNetworkCIDR,
}
return file.WriteTemplate(filename, tmpl, d)
}
//go:generate templify -t ${GOTMPL} -p node -f cniLoopback cni_loopback.json.tmpl
func writeCNILoopback(out io.Writer, cloudProvider, filename string) error {
if cloudProvider != constants.CloudProviderAzure {
return nil
}
return file.Overwrite(filename, cniLoopbackTemplate())
}
|
return
}
| conditional_block |
node.go | // Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package node
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"text/template"
"time"
"emperror.dev/errors"
"github.com/banzaicloud/pke/cmd/pke/app/config"
"github.com/banzaicloud/pke/cmd/pke/app/constants"
"github.com/banzaicloud/pke/cmd/pke/app/phases"
"github.com/banzaicloud/pke/cmd/pke/app/phases/kubeadm"
"github.com/banzaicloud/pke/cmd/pke/app/util/file"
"github.com/banzaicloud/pke/cmd/pke/app/util/flags"
"github.com/banzaicloud/pke/cmd/pke/app/util/linux"
pipelineutil "github.com/banzaicloud/pke/cmd/pke/app/util/pipeline"
"github.com/banzaicloud/pke/cmd/pke/app/util/runner"
"github.com/banzaicloud/pke/cmd/pke/app/util/validator"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
use = "kubernetes-node"
short = "Kubernetes worker node installation"
cmdKubeadm = "kubeadm"
kubeProxyConfig = "/var/lib/kube-proxy/config.conf"
kubeadmConfig = "/etc/kubernetes/kubeadm.conf"
kubeadmAmazonConfig = "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string {
return short
}
func (n *Node) R | flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil {
return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil {
return
}
}
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node) install(out io.Writer) error {
// write kubeadm config
if err := n.writeKubeadmConfig(out, kubeadmConfig); err != nil {
return err
}
err := writeKubeProxyConfig(out, kubeProxyConfig)
if err != nil {
return err
}
// write kubeadm aws.conf
err = kubeadm.WriteKubeadmAmazonConfig(out, kubeadmAmazonConfig, n.cloudProvider)
if err != nil {
return err
}
// write kubeadm azure.conf
err = kubeadm.WriteKubeadmAzureConfig(out, kubeadmAzureConfig, n.cloudProvider, n.azureTenantID, n.azureSubnetName, n.azureSecurityGroupName, n.azureVNetName, n.azureVNetResourceGroup, n.azureVMType, n.azureLoadBalancerSku, n.azureRouteTableName, true)
if err != nil {
return err
}
// create cni directory
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, cniDir)
if err := os.MkdirAll(cniDir, 0755); err != nil {
return err
}
// CNI network bridge
if err := writeCNIBridge(out, n.cloudProvider, n.podNetworkCIDR, cniBridgeConfig); err != nil {
return err
}
// CNI network loopback
if err := writeCNILoopback(out, n.cloudProvider, cniLoopbackConfig); err != nil {
return err
}
for i := 0; i < maxJoinRetries; i++ {
var ll string
// kubeadm join 10.240.0.11:6443 --token 0uk28q.e5i6ewi7xb0g8ye9 --discovery-token-ca-cert-hash sha256:a1a74c00ecccf947b69b49172390018096affbbae25447c4bd0c0906273c1482 --cri-socket=unix:///run/containerd/containerd.sock
ll, err = runner.Cmd(out, cmdKubeadm, "join", "--config="+kubeadmConfig).CombinedOutputAsync()
if err == nil {
break
}
// re-run command on connection refused error
// couldn't validate the identity of the API Server: abort connecting to API servers after timeout of 5m0s
if !strings.Contains(ll, "connection refused") && !strings.Contains(ll, "timeout") {
return err
}
_, _ = fmt.Fprintf(out, "[%s] re-run %q command\n", use, cmdKubeadm)
time.Sleep(time.Second)
}
if err != nil {
return err
}
return linux.SystemctlEnableAndStart(out, "kubelet")
}
//go:generate templify -t ${GOTMPL} -p node -f kubeProxyConfig kube_proxy_config.yaml.tmpl
func writeKubeProxyConfig(out io.Writer, filename string) error {
dir := filepath.Dir(filename)
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, dir)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
return file.Overwrite(filename, kubeProxyConfigTemplate())
}
//go:generate templify -t ${GOTMPL} -p node -f cniBridge cni_bridge.json.tmpl
func writeCNIBridge(out io.Writer, cloudProvider, podNetworkCIDR, filename string) error {
if cloudProvider != constants.CloudProviderAzure || podNetworkCIDR == "" {
return nil
}
tmpl, err := template.New("cni-bridge").Parse(cniBridgeTemplate())
if err != nil {
return err
}
type data struct {
PodNetworkCIDR string
}
d := data{
PodNetworkCIDR: podNetworkCIDR,
}
return file.WriteTemplate(filename, tmpl, d)
}
//go:generate templify -t ${GOTMPL} -p node -f cniLoopback cni_loopback.json.tmpl
func writeCNILoopback(out io.Writer, cloudProvider, filename string) error {
if cloudProvider != constants.CloudProviderAzure {
return nil
}
return file.Overwrite(filename, cniLoopbackTemplate())
}
| egisterFlags( | identifier_name |
node.go | // Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package node
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"text/template"
"time"
"emperror.dev/errors"
"github.com/banzaicloud/pke/cmd/pke/app/config"
"github.com/banzaicloud/pke/cmd/pke/app/constants"
"github.com/banzaicloud/pke/cmd/pke/app/phases"
"github.com/banzaicloud/pke/cmd/pke/app/phases/kubeadm"
"github.com/banzaicloud/pke/cmd/pke/app/util/file"
"github.com/banzaicloud/pke/cmd/pke/app/util/flags"
"github.com/banzaicloud/pke/cmd/pke/app/util/linux"
pipelineutil "github.com/banzaicloud/pke/cmd/pke/app/util/pipeline"
"github.com/banzaicloud/pke/cmd/pke/app/util/runner"
"github.com/banzaicloud/pke/cmd/pke/app/util/validator"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
use = "kubernetes-node"
short = "Kubernetes worker node installation"
cmdKubeadm = "kubeadm"
kubeProxyConfig = "/var/lib/kube-proxy/config.conf"
kubeadmConfig = "/etc/kubernetes/kubeadm.conf"
kubeadmAmazonConfig = "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string {
return short
}
func (n *Node) RegisterFlags(flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil { | return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil {
return
}
}
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node) install(out io.Writer) error {
// write kubeadm config
if err := n.writeKubeadmConfig(out, kubeadmConfig); err != nil {
return err
}
err := writeKubeProxyConfig(out, kubeProxyConfig)
if err != nil {
return err
}
// write kubeadm aws.conf
err = kubeadm.WriteKubeadmAmazonConfig(out, kubeadmAmazonConfig, n.cloudProvider)
if err != nil {
return err
}
// write kubeadm azure.conf
err = kubeadm.WriteKubeadmAzureConfig(out, kubeadmAzureConfig, n.cloudProvider, n.azureTenantID, n.azureSubnetName, n.azureSecurityGroupName, n.azureVNetName, n.azureVNetResourceGroup, n.azureVMType, n.azureLoadBalancerSku, n.azureRouteTableName, true)
if err != nil {
return err
}
// create cni directory
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, cniDir)
if err := os.MkdirAll(cniDir, 0755); err != nil {
return err
}
// CNI network bridge
if err := writeCNIBridge(out, n.cloudProvider, n.podNetworkCIDR, cniBridgeConfig); err != nil {
return err
}
// CNI network loopback
if err := writeCNILoopback(out, n.cloudProvider, cniLoopbackConfig); err != nil {
return err
}
for i := 0; i < maxJoinRetries; i++ {
var ll string
// kubeadm join 10.240.0.11:6443 --token 0uk28q.e5i6ewi7xb0g8ye9 --discovery-token-ca-cert-hash sha256:a1a74c00ecccf947b69b49172390018096affbbae25447c4bd0c0906273c1482 --cri-socket=unix:///run/containerd/containerd.sock
ll, err = runner.Cmd(out, cmdKubeadm, "join", "--config="+kubeadmConfig).CombinedOutputAsync()
if err == nil {
break
}
// re-run command on connection refused error
// couldn't validate the identity of the API Server: abort connecting to API servers after timeout of 5m0s
if !strings.Contains(ll, "connection refused") && !strings.Contains(ll, "timeout") {
return err
}
_, _ = fmt.Fprintf(out, "[%s] re-run %q command\n", use, cmdKubeadm)
time.Sleep(time.Second)
}
if err != nil {
return err
}
return linux.SystemctlEnableAndStart(out, "kubelet")
}
//go:generate templify -t ${GOTMPL} -p node -f kubeProxyConfig kube_proxy_config.yaml.tmpl
func writeKubeProxyConfig(out io.Writer, filename string) error {
dir := filepath.Dir(filename)
_, _ = fmt.Fprintf(out, "[%s] creating directory: %q\n", use, dir)
err := os.MkdirAll(dir, 0750)
if err != nil {
return err
}
return file.Overwrite(filename, kubeProxyConfigTemplate())
}
//go:generate templify -t ${GOTMPL} -p node -f cniBridge cni_bridge.json.tmpl
func writeCNIBridge(out io.Writer, cloudProvider, podNetworkCIDR, filename string) error {
if cloudProvider != constants.CloudProviderAzure || podNetworkCIDR == "" {
return nil
}
tmpl, err := template.New("cni-bridge").Parse(cniBridgeTemplate())
if err != nil {
return err
}
type data struct {
PodNetworkCIDR string
}
d := data{
PodNetworkCIDR: podNetworkCIDR,
}
return file.WriteTemplate(filename, tmpl, d)
}
//go:generate templify -t ${GOTMPL} -p node -f cniLoopback cni_loopback.json.tmpl
func writeCNILoopback(out io.Writer, cloudProvider, filename string) error {
if cloudProvider != constants.CloudProviderAzure {
return nil
}
return file.Overwrite(filename, cniLoopbackTemplate())
} | random_line_split |
|
perf_tool.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package debugd
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strconv"
"sync"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/debugd"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type testCase struct {
quipperArgs []string // quipper arguments without the duration
disableCPUIdle bool
repetition int
}
func init() {
testing.AddTest(&testing.Test{
Func: PerfTool,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Tests D-Bus methods related to PerfTool",
Contacts: []string{
"[email protected]",
"[email protected]",
},
SoftwareDeps: []string{"chrome"},
Fixture: "chromeLoggedIn",
Params: []testing.Param{{
Name: "cycles",
Val: testCase{
quipperArgs: []string{"--", "record", "-a", "-e", "cycles", "-c", "1000003"},
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
}, {
Name: "etm",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil |
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
}
func testSingleCall(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui")
err := cmd.Run()
s.Log("stop ui returned: ", err)
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testRestoreCPUIdle(ctx context.Context, s *testing.State, d *debugd.Debugd) {
debugdPID := func() []byte {
cmd := exec.Command("pgrep", "debugd")
b, _ := cmd.Output()
return bytes.TrimSpace(b)
}
killDebugd := func() []byte {
b := debugdPID()
cmd := exec.Command("kill", "-9", string(b))
if err := cmd.Run(); err != nil {
s.Fatalf("Failed to kill debugd (%s), abort: %v", string(b), err)
}
return b
}
s.Run(ctx, "testRestoreCPUIdle", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if !tc.disableCPUIdle {
s.Log("Skipped, test case does not disable cpuidle states")
return
}
var old []byte
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled as intended: ", err)
}
old = killDebugd()
})
output, _, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
io.Copy(io.Discard, output)
output.Close()
err = testing.Poll(ctx, func(_ context.Context) error {
new := debugdPID()
if len(new) == 0 {
return errors.New("debugd process has not respawned yet")
}
if bytes.Compare(new, old) == 0 {
return errors.New("debugd process has not been killed")
}
return nil
}, &testing.PollOptions{Interval: time.Second})
if err != nil {
s.Error("Failed to wait for debugd to respawn: ", err)
}
if err := checkCPUIdleDisabled(false); err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
})
}
// checkCPUIdleDisabled verifies whether all CPU's idle states match the given
// disabled status.
func checkCPUIdleDisabled(disabled bool) error {
const cpuTopologyLocation = "/sys/devices/system/cpu/online"
const cpuidlePathPat = "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/disable"
b, err := ioutil.ReadFile(cpuTopologyLocation)
if err != nil {
return errors.Wrapf(err, "failed to read %s", cpuTopologyLocation)
}
var min, max int
if _, err := fmt.Sscanf(string(b), "%d-%d", &min, &max); err != nil {
return errors.Wrapf(err, "unexpected CPU topology file: %s", string(b))
}
for cpu := min; cpu <= max; cpu++ {
for state := 0; ; state++ {
cpuidlePath := fmt.Sprintf(cpuidlePathPat, cpu, state)
f, err := os.Open(cpuidlePath)
if err != nil {
if os.IsNotExist(err) {
break
}
return errors.Wrapf(err, "failed to open %s", cpuidlePath)
}
defer f.Close()
disable := make([]byte, 1)
if n, err := f.Read(disable); err != nil || n != 1 {
return errors.Wrapf(err, "failed to read %s", cpuidlePath)
}
if (disable[0] == '1') != disabled {
return errors.Errorf("file %s shows %s, which does not match the expected state %v",
cpuidlePath, string(disable), disabled)
}
}
}
return nil
}
| {
rPipe.Close()
return nil, 0, err
} | conditional_block |
perf_tool.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package debugd
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strconv"
"sync"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/debugd"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type testCase struct {
quipperArgs []string // quipper arguments without the duration
disableCPUIdle bool
repetition int
}
func init() {
testing.AddTest(&testing.Test{
Func: PerfTool,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Tests D-Bus methods related to PerfTool",
Contacts: []string{
"[email protected]",
"[email protected]",
},
SoftwareDeps: []string{"chrome"},
Fixture: "chromeLoggedIn",
Params: []testing.Param{{
Name: "cycles",
Val: testCase{
quipperArgs: []string{"--", "record", "-a", "-e", "cycles", "-c", "1000003"},
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
}, {
Name: "etm",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil {
rPipe.Close()
return nil, 0, err
}
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) |
func testSingleCall(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui")
err := cmd.Run()
s.Log("stop ui returned: ", err)
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testRestoreCPUIdle(ctx context.Context, s *testing.State, d *debugd.Debugd) {
debugdPID := func() []byte {
cmd := exec.Command("pgrep", "debugd")
b, _ := cmd.Output()
return bytes.TrimSpace(b)
}
killDebugd := func() []byte {
b := debugdPID()
cmd := exec.Command("kill", "-9", string(b))
if err := cmd.Run(); err != nil {
s.Fatalf("Failed to kill debugd (%s), abort: %v", string(b), err)
}
return b
}
s.Run(ctx, "testRestoreCPUIdle", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if !tc.disableCPUIdle {
s.Log("Skipped, test case does not disable cpuidle states")
return
}
var old []byte
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled as intended: ", err)
}
old = killDebugd()
})
output, _, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
io.Copy(io.Discard, output)
output.Close()
err = testing.Poll(ctx, func(_ context.Context) error {
new := debugdPID()
if len(new) == 0 {
return errors.New("debugd process has not respawned yet")
}
if bytes.Compare(new, old) == 0 {
return errors.New("debugd process has not been killed")
}
return nil
}, &testing.PollOptions{Interval: time.Second})
if err != nil {
s.Error("Failed to wait for debugd to respawn: ", err)
}
if err := checkCPUIdleDisabled(false); err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
})
}
// checkCPUIdleDisabled verifies whether all CPU's idle states match the given
// disabled status.
func checkCPUIdleDisabled(disabled bool) error {
const cpuTopologyLocation = "/sys/devices/system/cpu/online"
const cpuidlePathPat = "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/disable"
b, err := ioutil.ReadFile(cpuTopologyLocation)
if err != nil {
return errors.Wrapf(err, "failed to read %s", cpuTopologyLocation)
}
var min, max int
if _, err := fmt.Sscanf(string(b), "%d-%d", &min, &max); err != nil {
return errors.Wrapf(err, "unexpected CPU topology file: %s", string(b))
}
for cpu := min; cpu <= max; cpu++ {
for state := 0; ; state++ {
cpuidlePath := fmt.Sprintf(cpuidlePathPat, cpu, state)
f, err := os.Open(cpuidlePath)
if err != nil {
if os.IsNotExist(err) {
break
}
return errors.Wrapf(err, "failed to open %s", cpuidlePath)
}
defer f.Close()
disable := make([]byte, 1)
if n, err := f.Read(disable); err != nil || n != 1 {
return errors.Wrapf(err, "failed to read %s", cpuidlePath)
}
if (disable[0] == '1') != disabled {
return errors.Errorf("file %s shows %s, which does not match the expected state %v",
cpuidlePath, string(disable), disabled)
}
}
}
return nil
}
| {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
} | identifier_body |
perf_tool.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package debugd
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strconv"
"sync"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/debugd"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type testCase struct {
quipperArgs []string // quipper arguments without the duration
disableCPUIdle bool
repetition int
}
func init() {
testing.AddTest(&testing.Test{
Func: PerfTool,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Tests D-Bus methods related to PerfTool",
Contacts: []string{
"[email protected]",
"[email protected]",
},
SoftwareDeps: []string{"chrome"},
Fixture: "chromeLoggedIn",
Params: []testing.Param{{
Name: "cycles",
Val: testCase{
quipperArgs: []string{"--", "record", "-a", "-e", "cycles", "-c", "1000003"},
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
}, {
Name: "etm",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil {
rPipe.Close()
return nil, 0, err
}
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
}
func | (ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui")
err := cmd.Run()
s.Log("stop ui returned: ", err)
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testRestoreCPUIdle(ctx context.Context, s *testing.State, d *debugd.Debugd) {
debugdPID := func() []byte {
cmd := exec.Command("pgrep", "debugd")
b, _ := cmd.Output()
return bytes.TrimSpace(b)
}
killDebugd := func() []byte {
b := debugdPID()
cmd := exec.Command("kill", "-9", string(b))
if err := cmd.Run(); err != nil {
s.Fatalf("Failed to kill debugd (%s), abort: %v", string(b), err)
}
return b
}
s.Run(ctx, "testRestoreCPUIdle", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if !tc.disableCPUIdle {
s.Log("Skipped, test case does not disable cpuidle states")
return
}
var old []byte
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled as intended: ", err)
}
old = killDebugd()
})
output, _, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
io.Copy(io.Discard, output)
output.Close()
err = testing.Poll(ctx, func(_ context.Context) error {
new := debugdPID()
if len(new) == 0 {
return errors.New("debugd process has not respawned yet")
}
if bytes.Compare(new, old) == 0 {
return errors.New("debugd process has not been killed")
}
return nil
}, &testing.PollOptions{Interval: time.Second})
if err != nil {
s.Error("Failed to wait for debugd to respawn: ", err)
}
if err := checkCPUIdleDisabled(false); err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
})
}
// checkCPUIdleDisabled verifies whether all CPU's idle states match the given
// disabled status.
func checkCPUIdleDisabled(disabled bool) error {
const cpuTopologyLocation = "/sys/devices/system/cpu/online"
const cpuidlePathPat = "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/disable"
b, err := ioutil.ReadFile(cpuTopologyLocation)
if err != nil {
return errors.Wrapf(err, "failed to read %s", cpuTopologyLocation)
}
var min, max int
if _, err := fmt.Sscanf(string(b), "%d-%d", &min, &max); err != nil {
return errors.Wrapf(err, "unexpected CPU topology file: %s", string(b))
}
for cpu := min; cpu <= max; cpu++ {
for state := 0; ; state++ {
cpuidlePath := fmt.Sprintf(cpuidlePathPat, cpu, state)
f, err := os.Open(cpuidlePath)
if err != nil {
if os.IsNotExist(err) {
break
}
return errors.Wrapf(err, "failed to open %s", cpuidlePath)
}
defer f.Close()
disable := make([]byte, 1)
if n, err := f.Read(disable); err != nil || n != 1 {
return errors.Wrapf(err, "failed to read %s", cpuidlePath)
}
if (disable[0] == '1') != disabled {
return errors.Errorf("file %s shows %s, which does not match the expected state %v",
cpuidlePath, string(disable), disabled)
}
}
}
return nil
}
| testSingleCall | identifier_name |
perf_tool.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package debugd
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strconv"
"sync"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/debugd"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type testCase struct {
quipperArgs []string // quipper arguments without the duration
disableCPUIdle bool
repetition int
}
func init() {
testing.AddTest(&testing.Test{
Func: PerfTool,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Tests D-Bus methods related to PerfTool",
Contacts: []string{
"[email protected]",
"[email protected]",
},
SoftwareDeps: []string{"chrome"},
Fixture: "chromeLoggedIn",
Params: []testing.Param{{
Name: "cycles",
Val: testCase{
quipperArgs: []string{"--", "record", "-a", "-e", "cycles", "-c", "1000003"},
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
}, {
Name: "etm",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 1,
},
ExtraAttr: []string{"group:mainline", "informational"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil {
rPipe.Close()
return nil, 0, err
}
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
}
func testSingleCall(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes()) | func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui")
err := cmd.Run()
s.Log("stop ui returned: ", err)
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testRestoreCPUIdle(ctx context.Context, s *testing.State, d *debugd.Debugd) {
debugdPID := func() []byte {
cmd := exec.Command("pgrep", "debugd")
b, _ := cmd.Output()
return bytes.TrimSpace(b)
}
killDebugd := func() []byte {
b := debugdPID()
cmd := exec.Command("kill", "-9", string(b))
if err := cmd.Run(); err != nil {
s.Fatalf("Failed to kill debugd (%s), abort: %v", string(b), err)
}
return b
}
s.Run(ctx, "testRestoreCPUIdle", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if !tc.disableCPUIdle {
s.Log("Skipped, test case does not disable cpuidle states")
return
}
var old []byte
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled as intended: ", err)
}
old = killDebugd()
})
output, _, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
io.Copy(io.Discard, output)
output.Close()
err = testing.Poll(ctx, func(_ context.Context) error {
new := debugdPID()
if len(new) == 0 {
return errors.New("debugd process has not respawned yet")
}
if bytes.Compare(new, old) == 0 {
return errors.New("debugd process has not been killed")
}
return nil
}, &testing.PollOptions{Interval: time.Second})
if err != nil {
s.Error("Failed to wait for debugd to respawn: ", err)
}
if err := checkCPUIdleDisabled(false); err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
})
}
// checkCPUIdleDisabled verifies whether all CPU's idle states match the given
// disabled status.
func checkCPUIdleDisabled(disabled bool) error {
const cpuTopologyLocation = "/sys/devices/system/cpu/online"
const cpuidlePathPat = "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/disable"
b, err := ioutil.ReadFile(cpuTopologyLocation)
if err != nil {
return errors.Wrapf(err, "failed to read %s", cpuTopologyLocation)
}
var min, max int
if _, err := fmt.Sscanf(string(b), "%d-%d", &min, &max); err != nil {
return errors.Wrapf(err, "unexpected CPU topology file: %s", string(b))
}
for cpu := min; cpu <= max; cpu++ {
for state := 0; ; state++ {
cpuidlePath := fmt.Sprintf(cpuidlePathPat, cpu, state)
f, err := os.Open(cpuidlePath)
if err != nil {
if os.IsNotExist(err) {
break
}
return errors.Wrapf(err, "failed to open %s", cpuidlePath)
}
defer f.Close()
disable := make([]byte, 1)
if n, err := f.Read(disable); err != nil || n != 1 {
return errors.Wrapf(err, "failed to read %s", cpuidlePath)
}
if (disable[0] == '1') != disabled {
return errors.Errorf("file %s shows %s, which does not match the expected state %v",
cpuidlePath, string(disable), disabled)
}
}
}
return nil
} | })
}
| random_line_split |
dockerapi.go | // Package dockerapi is the facade to the Docker remote api.
package dockerapi
import (
"sync"
"context"
"fmt"
"io"
"log"
"os"
"tlex/mapsi2disk"
"golang.org/x/sync/errgroup"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/docker/go-connections/nat"
)
const containerRunningStateString string = "running"
// OwnedContainers contains the containers ID created by this process
type OwnedContainers map[string]int
// ContainerReaderStream contains a container reader stream, host port mapped to the container http server.
type ContainerReaderStream struct {
ReaderStream io.ReadCloser
HostPort int
}
// Cleanup previous owned live instances that might have been left hanging.
func RemoveLiveContainersFromPreviousRun() {
readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int)
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Deleting container: %v from previous launch.\n", containerID)
err = dockerClient.ContainerStop(context.Background(), containerID, nil)
}
}
}
// GetDockerClient returns a docker remote api client handle value foundational to all Docker remote api interactions.
// Upon error it panics.
// This process creates a docker client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Panicf("Docker client.NewClientWithOpts error: %s\n", err)
}
dockerClient.NegotiateAPIVersion(ctx)
return dockerClient
}
// BuildDockerImage builds a Docker Image for a given dockerFilePath located in the same folder
// of the running process.
// Upon Error it exits process.
func BuildDockerImage(dockerClient *client.Client, dockerFilePath string) {
tarDockerfileReader, err := archive.TarWithOptions(dockerFilePath, &archive.TarOptions{})
if err != nil {
log.Fatal(err, " :unable to create tar with Dockerfile")
}
log.Printf("Building Docker Image in %q\n", dockerFilePath)
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: true,
Tags: []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
}
}
}
// createContainer creates a new container for the dockerImageName
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
// Returns the new container's struct abstraction, error.
// Upon error it panics.
// Credit: https://medium.com/tarkalabs/controlling-the-docker-engine-in-go-826012f9671c
func createContainer(dockerClient *client.Client, dockerImageName string, httpServerContainerPort int, httpServerHostPort int) (container.ContainerCreateCreatedBody, error) {
hostBinding := nat.PortBinding{
HostIP: "0.0.0.0",
HostPort: fmt.Sprintf("%d", httpServerHostPort),
}
containerPort, err := nat.NewPort("tcp", fmt.Sprintf("%d", httpServerContainerPort))
if err != nil {
log.Panicf("Unable to create a tcp httpServerContainerPort %d\n", httpServerContainerPort)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
containerBody, err := dockerClient.ContainerCreate(context.Background(),
&container.Config{Image: dockerImageName},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: true,
},
nil,
fmt.Sprintf("HttpServerAt_%d", httpServerHostPort))
if err != nil {
log.Panicf("ContainerCreate failed for the image: %s, host port: %d with error: %s\n", dockerImageName, httpServerContainerPort, err)
}
return containerBody, err
}
// setContainerLive starts a created container in active live state.
func setContainerLive(dockerClient *client.Client, containerID string) (string, error) |
// CreateNewContainer
// 1. creates a new container for the given dockeImageName and
// 2. starts it into an active live state:
// at the container httpServerContainerPort value,
// and at the host httpServerHostPort value.
// Returns the new container ID, error.
func setNewContainerLive(dockerClient *client.Client, imageName string, httpServerContainerPort int, httpServerHostPort int) (string, error) {
cont, err := createContainer(dockerClient, imageName, httpServerContainerPort, httpServerHostPort)
containerID, err := setContainerLive(dockerClient, cont.ID)
if err != nil {
log.Printf("ContainerStart failed for the image: %s, host port: %d with error: %s\n", imageName, httpServerContainerPort, err)
return "", err
}
log.Printf("Container %s with host port %d is live.\n", containerID, httpServerHostPort)
return containerID, err
}
// CreateContainers requests live containers. It creates and starts them into an active live state for the given dockeImageName.
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
func (owned OwnedContainers) CreateContainers(launcherGroup *errgroup.Group, requestedLiveContainers int, dockerClient *client.Client, dockerImageName string, startingListeningHostPort int, containerListeningPort int) {
// Manage concurrent access to shared owned map
ownedMutex := &sync.Mutex{}
for i := 0; i < requestedLiveContainers; i++ {
// necessary to capture each loop iteration of i
portCounter := i
// Concurrent launching of docker instances
launcherGroup.Go(func() error {
hostPort := startingListeningHostPort + portCounter
containerID, err := setNewContainerLive(dockerClient, dockerImageName, containerListeningPort, hostPort)
if err != nil {
log.Printf("ContainerCreate failed for the image: %s, host port: %d with error:%s\n", dockerImageName, hostPort, err)
} else {
ownedMutex.Lock()
owned[containerID] = hostPort
ownedMutex.Unlock()
}
return err
})
}
}
// PersistOpenContainers saves the presumed populated owned containers map id-> ports into the filesystem.
func (owned OwnedContainers) PersistOpenContainerIDs() {
mapToSave := map[string]int(owned)
err := mapsi2disk.SaveContainerPorts2Disk(mapsi2disk.GobFilename, &mapToSave)
if err != nil {
log.Printf("SaveContainerPorts2Disk() error = %v\n", err)
}
} | {
err := dockerClient.ContainerStart(context.Background(), containerID, types.ContainerStartOptions{})
return containerID, err
} | identifier_body |
dockerapi.go | // Package dockerapi is the facade to the Docker remote api.
package dockerapi
import (
"sync"
"context"
"fmt"
"io"
"log"
"os"
"tlex/mapsi2disk"
"golang.org/x/sync/errgroup"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/docker/go-connections/nat"
)
const containerRunningStateString string = "running"
// OwnedContainers contains the containers ID created by this process
type OwnedContainers map[string]int
// ContainerReaderStream contains a container reader stream, host port mapped to the container http server.
type ContainerReaderStream struct {
ReaderStream io.ReadCloser
HostPort int
}
// Cleanup previous owned live instances that might have been left hanging.
func | () {
readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int)
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Deleting container: %v from previous launch.\n", containerID)
err = dockerClient.ContainerStop(context.Background(), containerID, nil)
}
}
}
// GetDockerClient returns a docker remote api client handle value foundational to all Docker remote api interactions.
// Upon error it panics.
// This process creates a docker client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Panicf("Docker client.NewClientWithOpts error: %s\n", err)
}
dockerClient.NegotiateAPIVersion(ctx)
return dockerClient
}
// BuildDockerImage builds a Docker Image for a given dockerFilePath located in the same folder
// of the running process.
// Upon Error it exits process.
func BuildDockerImage(dockerClient *client.Client, dockerFilePath string) {
tarDockerfileReader, err := archive.TarWithOptions(dockerFilePath, &archive.TarOptions{})
if err != nil {
log.Fatal(err, " :unable to create tar with Dockerfile")
}
log.Printf("Building Docker Image in %q\n", dockerFilePath)
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: true,
Tags: []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
}
}
}
// createContainer creates a new container for the dockerImageName
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
// Returns the new container's struct abstraction, error.
// Upon error it panics.
// Credit: https://medium.com/tarkalabs/controlling-the-docker-engine-in-go-826012f9671c
func createContainer(dockerClient *client.Client, dockerImageName string, httpServerContainerPort int, httpServerHostPort int) (container.ContainerCreateCreatedBody, error) {
hostBinding := nat.PortBinding{
HostIP: "0.0.0.0",
HostPort: fmt.Sprintf("%d", httpServerHostPort),
}
containerPort, err := nat.NewPort("tcp", fmt.Sprintf("%d", httpServerContainerPort))
if err != nil {
log.Panicf("Unable to create a tcp httpServerContainerPort %d\n", httpServerContainerPort)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
containerBody, err := dockerClient.ContainerCreate(context.Background(),
&container.Config{Image: dockerImageName},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: true,
},
nil,
fmt.Sprintf("HttpServerAt_%d", httpServerHostPort))
if err != nil {
log.Panicf("ContainerCreate failed for the image: %s, host port: %d with error: %s\n", dockerImageName, httpServerContainerPort, err)
}
return containerBody, err
}
// setContainerLive starts a created container in active live state.
func setContainerLive(dockerClient *client.Client, containerID string) (string, error) {
err := dockerClient.ContainerStart(context.Background(), containerID, types.ContainerStartOptions{})
return containerID, err
}
// CreateNewContainer
// 1. creates a new container for the given dockeImageName and
// 2. starts it into an active live state:
// at the container httpServerContainerPort value,
// and at the host httpServerHostPort value.
// Returns the new container ID, error.
func setNewContainerLive(dockerClient *client.Client, imageName string, httpServerContainerPort int, httpServerHostPort int) (string, error) {
cont, err := createContainer(dockerClient, imageName, httpServerContainerPort, httpServerHostPort)
containerID, err := setContainerLive(dockerClient, cont.ID)
if err != nil {
log.Printf("ContainerStart failed for the image: %s, host port: %d with error: %s\n", imageName, httpServerContainerPort, err)
return "", err
}
log.Printf("Container %s with host port %d is live.\n", containerID, httpServerHostPort)
return containerID, err
}
// CreateContainers requests live containers. It creates and starts them into an active live state for the given dockeImageName.
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
func (owned OwnedContainers) CreateContainers(launcherGroup *errgroup.Group, requestedLiveContainers int, dockerClient *client.Client, dockerImageName string, startingListeningHostPort int, containerListeningPort int) {
// Manage concurrent access to shared owned map
ownedMutex := &sync.Mutex{}
for i := 0; i < requestedLiveContainers; i++ {
// necessary to capture each loop iteration of i
portCounter := i
// Concurrent launching of docker instances
launcherGroup.Go(func() error {
hostPort := startingListeningHostPort + portCounter
containerID, err := setNewContainerLive(dockerClient, dockerImageName, containerListeningPort, hostPort)
if err != nil {
log.Printf("ContainerCreate failed for the image: %s, host port: %d with error:%s\n", dockerImageName, hostPort, err)
} else {
ownedMutex.Lock()
owned[containerID] = hostPort
ownedMutex.Unlock()
}
return err
})
}
}
// PersistOpenContainers saves the presumed populated owned containers map id-> ports into the filesystem.
func (owned OwnedContainers) PersistOpenContainerIDs() {
mapToSave := map[string]int(owned)
err := mapsi2disk.SaveContainerPorts2Disk(mapsi2disk.GobFilename, &mapToSave)
if err != nil {
log.Printf("SaveContainerPorts2Disk() error = %v\n", err)
}
} | RemoveLiveContainersFromPreviousRun | identifier_name |
dockerapi.go | // Package dockerapi is the facade to the Docker remote api.
package dockerapi
import (
"sync"
"context"
"fmt"
"io"
"log"
"os"
"tlex/mapsi2disk"
"golang.org/x/sync/errgroup"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/docker/go-connections/nat"
)
const containerRunningStateString string = "running"
// OwnedContainers contains the containers ID created by this process
type OwnedContainers map[string]int
// ContainerReaderStream contains a container reader stream, host port mapped to the container http server.
type ContainerReaderStream struct {
ReaderStream io.ReadCloser
HostPort int
}
// Cleanup previous owned live instances that might have been left hanging.
func RemoveLiveContainersFromPreviousRun() {
|
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Deleting container: %v from previous launch.\n", containerID)
err = dockerClient.ContainerStop(context.Background(), containerID, nil)
}
}
}
// GetDockerClient returns a docker remote api client handle value foundational to all Docker remote api interactions.
// Upon error it panics.
// This process creates a docker client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Panicf("Docker client.NewClientWithOpts error: %s\n", err)
}
dockerClient.NegotiateAPIVersion(ctx)
return dockerClient
}
// BuildDockerImage builds a Docker Image for a given dockerFilePath located in the same folder
// of the running process.
// Upon Error it exits process.
func BuildDockerImage(dockerClient *client.Client, dockerFilePath string) {
tarDockerfileReader, err := archive.TarWithOptions(dockerFilePath, &archive.TarOptions{})
if err != nil {
log.Fatal(err, " :unable to create tar with Dockerfile")
}
log.Printf("Building Docker Image in %q\n", dockerFilePath)
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: true,
Tags: []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
}
}
}
// createContainer creates a new container for the dockerImageName
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
// Returns the new container's struct abstraction, error.
// Upon error it panics.
// Credit: https://medium.com/tarkalabs/controlling-the-docker-engine-in-go-826012f9671c
func createContainer(dockerClient *client.Client, dockerImageName string, httpServerContainerPort int, httpServerHostPort int) (container.ContainerCreateCreatedBody, error) {
hostBinding := nat.PortBinding{
HostIP: "0.0.0.0",
HostPort: fmt.Sprintf("%d", httpServerHostPort),
}
containerPort, err := nat.NewPort("tcp", fmt.Sprintf("%d", httpServerContainerPort))
if err != nil {
log.Panicf("Unable to create a tcp httpServerContainerPort %d\n", httpServerContainerPort)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
containerBody, err := dockerClient.ContainerCreate(context.Background(),
&container.Config{Image: dockerImageName},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: true,
},
nil,
fmt.Sprintf("HttpServerAt_%d", httpServerHostPort))
if err != nil {
log.Panicf("ContainerCreate failed for the image: %s, host port: %d with error: %s\n", dockerImageName, httpServerContainerPort, err)
}
return containerBody, err
}
// setContainerLive starts a created container in active live state.
func setContainerLive(dockerClient *client.Client, containerID string) (string, error) {
err := dockerClient.ContainerStart(context.Background(), containerID, types.ContainerStartOptions{})
return containerID, err
}
// CreateNewContainer
// 1. creates a new container for the given dockeImageName and
// 2. starts it into an active live state:
// at the container httpServerContainerPort value,
// and at the host httpServerHostPort value.
// Returns the new container ID, error.
func setNewContainerLive(dockerClient *client.Client, imageName string, httpServerContainerPort int, httpServerHostPort int) (string, error) {
cont, err := createContainer(dockerClient, imageName, httpServerContainerPort, httpServerHostPort)
containerID, err := setContainerLive(dockerClient, cont.ID)
if err != nil {
log.Printf("ContainerStart failed for the image: %s, host port: %d with error: %s\n", imageName, httpServerContainerPort, err)
return "", err
}
log.Printf("Container %s with host port %d is live.\n", containerID, httpServerHostPort)
return containerID, err
}
// CreateContainers requests live containers. It creates and starts them into an active live state for the given dockeImageName.
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
func (owned OwnedContainers) CreateContainers(launcherGroup *errgroup.Group, requestedLiveContainers int, dockerClient *client.Client, dockerImageName string, startingListeningHostPort int, containerListeningPort int) {
// Manage concurrent access to shared owned map
ownedMutex := &sync.Mutex{}
for i := 0; i < requestedLiveContainers; i++ {
// necessary to capture each loop iteration of i
portCounter := i
// Concurrent launching of docker instances
launcherGroup.Go(func() error {
hostPort := startingListeningHostPort + portCounter
containerID, err := setNewContainerLive(dockerClient, dockerImageName, containerListeningPort, hostPort)
if err != nil {
log.Printf("ContainerCreate failed for the image: %s, host port: %d with error:%s\n", dockerImageName, hostPort, err)
} else {
ownedMutex.Lock()
owned[containerID] = hostPort
ownedMutex.Unlock()
}
return err
})
}
}
// PersistOpenContainers saves the presumed populated owned containers map id-> ports into the filesystem.
func (owned OwnedContainers) PersistOpenContainerIDs() {
mapToSave := map[string]int(owned)
err := mapsi2disk.SaveContainerPorts2Disk(mapsi2disk.GobFilename, &mapToSave)
if err != nil {
log.Printf("SaveContainerPorts2Disk() error = %v\n", err)
}
} | readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int) | random_line_split |
dockerapi.go | // Package dockerapi is the facade to the Docker remote api.
package dockerapi
import (
"sync"
"context"
"fmt"
"io"
"log"
"os"
"tlex/mapsi2disk"
"golang.org/x/sync/errgroup"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/docker/go-connections/nat"
)
const containerRunningStateString string = "running"
// OwnedContainers contains the containers ID created by this process
type OwnedContainers map[string]int
// ContainerReaderStream contains a container reader stream, host port mapped to the container http server.
type ContainerReaderStream struct {
ReaderStream io.ReadCloser
HostPort int
}
// Cleanup previous owned live instances that might have been left hanging.
func RemoveLiveContainersFromPreviousRun() {
readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int)
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Deleting container: %v from previous launch.\n", containerID)
err = dockerClient.ContainerStop(context.Background(), containerID, nil)
}
}
}
// GetDockerClient returns a docker remote api client handle value foundational to all Docker remote api interactions.
// Upon error it panics.
// This process creates a docker client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Panicf("Docker client.NewClientWithOpts error: %s\n", err)
}
dockerClient.NegotiateAPIVersion(ctx)
return dockerClient
}
// BuildDockerImage builds a Docker Image for a given dockerFilePath located in the same folder
// of the running process.
// Upon Error it exits process.
func BuildDockerImage(dockerClient *client.Client, dockerFilePath string) {
tarDockerfileReader, err := archive.TarWithOptions(dockerFilePath, &archive.TarOptions{})
if err != nil {
log.Fatal(err, " :unable to create tar with Dockerfile")
}
log.Printf("Building Docker Image in %q\n", dockerFilePath)
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: true,
Tags: []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers |
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
}
}
}
// createContainer creates a new container for the dockerImageName
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
// Returns the new container's struct abstraction, error.
// Upon error it panics.
// Credit: https://medium.com/tarkalabs/controlling-the-docker-engine-in-go-826012f9671c
func createContainer(dockerClient *client.Client, dockerImageName string, httpServerContainerPort int, httpServerHostPort int) (container.ContainerCreateCreatedBody, error) {
hostBinding := nat.PortBinding{
HostIP: "0.0.0.0",
HostPort: fmt.Sprintf("%d", httpServerHostPort),
}
containerPort, err := nat.NewPort("tcp", fmt.Sprintf("%d", httpServerContainerPort))
if err != nil {
log.Panicf("Unable to create a tcp httpServerContainerPort %d\n", httpServerContainerPort)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
containerBody, err := dockerClient.ContainerCreate(context.Background(),
&container.Config{Image: dockerImageName},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: true,
},
nil,
fmt.Sprintf("HttpServerAt_%d", httpServerHostPort))
if err != nil {
log.Panicf("ContainerCreate failed for the image: %s, host port: %d with error: %s\n", dockerImageName, httpServerContainerPort, err)
}
return containerBody, err
}
// setContainerLive starts a created container in active live state.
func setContainerLive(dockerClient *client.Client, containerID string) (string, error) {
err := dockerClient.ContainerStart(context.Background(), containerID, types.ContainerStartOptions{})
return containerID, err
}
// CreateNewContainer
// 1. creates a new container for the given dockeImageName and
// 2. starts it into an active live state:
// at the container httpServerContainerPort value,
// and at the host httpServerHostPort value.
// Returns the new container ID, error.
func setNewContainerLive(dockerClient *client.Client, imageName string, httpServerContainerPort int, httpServerHostPort int) (string, error) {
cont, err := createContainer(dockerClient, imageName, httpServerContainerPort, httpServerHostPort)
containerID, err := setContainerLive(dockerClient, cont.ID)
if err != nil {
log.Printf("ContainerStart failed for the image: %s, host port: %d with error: %s\n", imageName, httpServerContainerPort, err)
return "", err
}
log.Printf("Container %s with host port %d is live.\n", containerID, httpServerHostPort)
return containerID, err
}
// CreateContainers requests live containers. It creates and starts them into an active live state for the given dockeImageName.
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
func (owned OwnedContainers) CreateContainers(launcherGroup *errgroup.Group, requestedLiveContainers int, dockerClient *client.Client, dockerImageName string, startingListeningHostPort int, containerListeningPort int) {
// Manage concurrent access to shared owned map
ownedMutex := &sync.Mutex{}
for i := 0; i < requestedLiveContainers; i++ {
// necessary to capture each loop iteration of i
portCounter := i
// Concurrent launching of docker instances
launcherGroup.Go(func() error {
hostPort := startingListeningHostPort + portCounter
containerID, err := setNewContainerLive(dockerClient, dockerImageName, containerListeningPort, hostPort)
if err != nil {
log.Printf("ContainerCreate failed for the image: %s, host port: %d with error:%s\n", dockerImageName, hostPort, err)
} else {
ownedMutex.Lock()
owned[containerID] = hostPort
ownedMutex.Unlock()
}
return err
})
}
}
// PersistOpenContainers saves the presumed populated owned containers map id-> ports into the filesystem.
func (owned OwnedContainers) PersistOpenContainerIDs() {
mapToSave := map[string]int(owned)
err := mapsi2disk.SaveContainerPorts2Disk(mapsi2disk.GobFilename, &mapToSave)
if err != nil {
log.Printf("SaveContainerPorts2Disk() error = %v\n", err)
}
} | {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
} | conditional_block |
set6.go | package main
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"math/big"
"os"
"strings"
"time"
"github.com/fatih/color"
"github.com/jgblight/matasano/pkg/ciphers"
"github.com/jgblight/matasano/pkg/hashes"
"github.com/jgblight/matasano/pkg/secrets"
"github.com/jgblight/matasano/pkg/utils"
)
const (
dataDir = "/Users/jennifer/go/src/github.com/jgblight/matasano/data/"
)
func problemOne() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
message, err := secrets.GetClientMessage(server)
if err != nil {
return err
}
fmt.Printf("Original Ciphertext: %s\n", message)
c, err := utils.HexToBigint(message)
if err != nil {
return err
}
s := utils.GetBigInt(2)
cPrime := new(big.Int).Exp(s, server.E, server.N)
cPrime.Mul(cPrime, c)
cPrime.Mod(cPrime, server.N)
cHex := hex.EncodeToString(cPrime.Bytes())
fmt.Printf("Modified Ciphertext: %s\n", cHex)
plaintext, err := server.DecryptMessage(cHex)
if err != nil {
return err
}
pPrime := new(big.Int).SetBytes(plaintext)
p, err := ciphers.InvMod(s, server.N)
if err != nil {
return err
}
p.Mul(p, pPrime)
p.Mod(p, server.N)
fmt.Printf(" Recovered Message: %q\n", p.Bytes())
return nil
}
func cubeRoot(n *big.Int) *big.Int {
xN := utils.GetBigInt(1)
diff := utils.GetBigInt(5)
approx := utils.GetBigInt(0)
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
for diff.CmpAbs(approx) == 1 {
t := new(big.Int)
t = t.Div(n, t.Mul(xN, xN))
xN1 := new(big.Int)
xN1 = xN1.Div(xN1.Add(xN1.Mul(two, xN), t), three)
diff = diff.Sub(xN, xN1)
xN = xN1.Set(xN1)
}
return xN
}
func problemTwo() error {
e, d, n, err := ciphers.RSAKeygen(1024)
if err != nil {
return err
}
plaintext := []byte("hi mom")
signature, err := ciphers.PKCS15Sign(plaintext, d, n)
fmt.Printf("Valid Signature: %s\n", signature)
verified := ciphers.PKCS15Verify(plaintext, signature, e, n)
fmt.Printf("Verified: %t\n", verified)
hash, err := hex.DecodeString(hashes.SHA1(plaintext))
if err != nil {
return err
}
padding := utils.MakeRepeatChar('\xff', 10)
padded := append([]byte("\x00\x01"), padding...)
padded = append(padded, '\x00')
padded = append(padded, hash...)
padded = append(padded, utils.MakeRepeatChar('\x00', 95)...)
x := new(big.Int).SetBytes(padded)
y := cubeRoot(x)
y = y.Add(y, utils.GetBigInt(1)) // overestimation > underestimation
forgery := hex.EncodeToString(y.Bytes())
fmt.Printf("Forged Signature: %s\n", forgery)
verified = ciphers.PKCS15Verify(plaintext, forgery, e, n)
fmt.Printf("Verified: %t\n", verified)
return nil
}
func recoverKey(k, H, r, s, q *big.Int) *big.Int {
x := new(big.Int)
r1 := new(big.Int).ModInverse(r, q)
x = x.Mod(x.Mul(x.Sub(x.Mul(s, k), H), r1), q)
return x
}
func problemThree() error {
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
m := []byte("I'm a string")
fmt.Printf("DSA string: %q\n", m)
signature, err := ciphers.DSASign(m, x, params)
if err != nil {
return err
}
fmt.Printf("DSA signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(m, signature, y, params)
if err != nil {
return err
}
fmt.Printf("Verified: %v\n\n", verified)
weakR, _ := new(big.Int).SetString("548099063082341131477253921760299949438196259240", 10)
weakS, _ := new(big.Int).SetString("857042759984254168557880549501802188789837994940", 10)
message := []byte("For those that envy a MC it can be hazardous to your health\nSo be friendly, a matter of life and death, just like a etch-a-sketch\n")
hash, err := utils.HexToBigint(hashes.SHA1(message))
if err != nil {
return err
}
k := new(big.Int)
for i := 1; i <= 65536; i++ {
k = k.SetInt64(int64(i))
r := new(big.Int)
r = r.Mod(r.Exp(params.G, k, params.P), params.Q)
if r.Cmp(weakR) == 0 {
break
}
}
privateKey := recoverKey(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := []string{}
f, err := os.Open(dataDir + input)
if err != nil {
return err
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
str := scanner.Text()
subs := strings.SplitN(str, ": ", 2)
if subs[0] == "m" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 16)
ms = append(ms, n)
} else if subs[0] == "r" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
rs = append(rs, n)
} else if subs[0] == "s" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
ss = append(ss, n)
} else {
msgs = append(msgs, subs[1])
}
}
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
msgCount := len(ms)
k := new(big.Int)
found := false
for i := 0; i < msgCount; i++ {
for j := i; j < msgCount; j++ {
if i == j {
continue
}
if rs[i].Cmp(rs[j]) == 0 {
num := new(big.Int).Sub(ms[i], ms[j])
den := new(big.Int)
den = den.ModInverse(den.Sub(ss[i], ss[j]), params.Q)
k = k.Mod(k.Mul(num, den), params.Q)
hash, err := utils.HexToBigint(hashes.SHA1([]byte(msgs[i])))
if err != nil {
return err
}
privateKey := recoverKey(k, hash, rs[i], ss[i], params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "ca8f6f7c66fa362d40760d135b763eb8527d3d52" {
fmt.Printf("Found key: %v\n", privateKey)
found = true
break
}
}
}
if found {
break
}
}
return nil
}
func problemFive() error {
hello := []byte("Hello, world")
goodbye := []byte("Goodbye, world")
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
params.G = utils.GetBigInt(0)
signature, err := ciphers.DSASign(hello, x, params)
if err != nil {
return err
}
fmt.Println("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage2(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
bits := server.N.BitLen()
lowerBound := utils.GetBigInt(0)
upperBound := new(big.Int).Set(server.N)
multiplier := utils.GetBigInt(2)
two := utils.GetBigInt(2)
printInline(upperBound.Bytes())
for i := 0; i < bits; i++ {
even, err := server.CheckIsEven(rsaMul(c, multiplier, server))
if err != nil |
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Interval struct {
Lower *big.Int
Upper *big.Int
B *big.Int
TwoB *big.Int
ThreeB *big.Int
ThreeBSub1 *big.Int
}
func initialInterval(n *big.Int) *Interval {
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
B := new(big.Int).Exp(two, utils.GetBigInt(n.BitLen()-16), nil)
TwoB := new(big.Int).Mul(two, B)
ThreeB := new(big.Int).Mul(three, B)
ThreeBSub1 := new(big.Int).Sub(ThreeB, utils.GetBigInt(1))
return &Interval{TwoB, ThreeBSub1, B, TwoB, ThreeB, ThreeBSub1}
}
func searchS(s0, c *big.Int, server *secrets.RSAServer) (*big.Int, error) {
s1 := new(big.Int).Set(s0)
one := utils.GetBigInt(1)
var err error
valid := false
for !valid {
c1 := rsaMul(c, s1, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, err
}
if valid {
return s1, nil
}
s1 = s1.Add(s1, one)
}
return s1, nil
}
func searchRS(s0, c *big.Int, interval *Interval, server *secrets.RSAServer) (*big.Int, *big.Int, error) {
one := utils.GetBigInt(1)
r := new(big.Int)
r = ceilDiv(r.Mul(utils.GetBigInt(2), r.Sub(r.Mul(interval.Upper, s0), interval.TwoB)), server.N)
s := new(big.Int)
minS := new(big.Int)
maxS := new(big.Int)
var err error
valid := false
for r.Cmp(server.N) == -1 {
rn := new(big.Int).Mul(r, server.N)
minS = minS.Div(minS.Add(interval.TwoB, rn), interval.Upper)
maxS = maxS.Div(maxS.Add(interval.ThreeB, rn), interval.Lower)
for s.Set(minS); s.Cmp(maxS) == -1; s.Add(s, one) {
c1 := rsaMul(c, s, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, nil, err
}
if valid {
return r, s, nil
}
}
r = r.Add(r, one)
}
return nil, nil, errors.New("could not find parameters")
}
func nextInterval(interval *Interval, s, r, n *big.Int) *Interval {
rn := new(big.Int).Mul(r, n)
a1 := new(big.Int)
a1 = ceilDiv(a1.Add(interval.TwoB, rn), s)
b1 := new(big.Int)
b1 = b1.Div(b1.Add(interval.ThreeBSub1, rn), s)
var newInt Interval
newInt = *interval
if interval.Lower.Cmp(a1) == -1 {
newInt.Lower = a1
}
if interval.Upper.Cmp(b1) == 1 {
newInt.Upper = b1
}
return &newInt
}
func ceilDiv(x, y *big.Int) *big.Int {
mod := new(big.Int)
zero := utils.GetBigInt(0)
z, mod := new(big.Int).DivMod(x, y, mod)
if mod.Cmp(zero) != 0 {
z = z.Add(z, utils.GetBigInt(1))
}
return z
}
func allIntervals(currentSet []*Interval, s, n *big.Int) []*Interval {
one := utils.GetBigInt(1)
newSet := []*Interval{}
for i := 0; i < len(currentSet); i++ {
bounds := currentSet[i]
minBound := new(big.Int)
minBound = ceilDiv(minBound.Sub(minBound.Mul(bounds.Lower, s), bounds.ThreeBSub1), n)
maxBound := new(big.Int)
maxBound = maxBound.Div(maxBound.Sub(maxBound.Mul(bounds.Upper, s), bounds.TwoB), n)
r := minBound
for r.Cmp(maxBound) != 1 {
next := nextInterval(bounds, s, r, n)
newSet = append(newSet, next)
r = r.Add(r, one)
}
}
return newSet
}
func printIntervals(set []*Interval) {
printInline(set[0].Lower.Bytes())
}
func bleichbacherAttack(c *big.Int, server *secrets.RSAServer) ([]byte, error) {
one := utils.GetBigInt(1)
bounds := initialInterval(server.N)
minS := new(big.Int)
minS = minS.Div(server.N, bounds.ThreeB)
s, err := searchS(minS, c, server)
if err != nil {
return nil, err
}
intervalSet := []*Interval{bounds}
for i := 0; i < 5000; i++ {
intervalSet = allIntervals(intervalSet, s, server.N)
for j := 0; j < len(intervalSet); j++ {
if intervalSet[j].Lower.Cmp(intervalSet[j].Upper) == 0 {
return intervalSet[j].Lower.Bytes(), nil
}
}
printIntervals(intervalSet)
if len(intervalSet) > 1 {
minS = minS.Add(s, one)
s, err = searchS(minS, c, server)
if err != nil {
return nil, err
}
} else {
_, s, err = searchRS(s, c, bounds, server)
if err != nil {
return nil, err
}
}
}
return nil, errors.New("Found nothing")
}
func removePadding(plaintext []byte) []byte {
var i int
for i = 2; i < len(plaintext); i++ {
if plaintext[i] == '\x00' {
break
}
}
return plaintext[i+1 : len(plaintext)]
}
func problemSeven() error {
server, err := secrets.NewRSAServer(256)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage3(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func problemEight() error {
server, err := secrets.NewRSAServer(768)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage4(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func main() {
header := color.New(color.FgCyan, color.Bold)
header.Println("Problem 1: Unpadded Message Recovery Oracle")
err := problemOne()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 2: Bleichenbacher's e=3 RSA Attack")
err = problemTwo()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 3: DSA key recovery from nonce")
err = problemThree()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 4: DSA key recovery from repeated nonce")
err = problemFour("set6challenge44.txt")
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 5: DSA parameter tampering")
err = problemFive()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 6: RSA parity oracle")
err = problemSix()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 7: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 1)")
err = problemSeven()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 8: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 2)")
err = problemEight()
if err != nil {
fmt.Println(err)
}
fmt.Println()
}
| {
return err
} | conditional_block |
set6.go | package main
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"math/big"
"os"
"strings"
"time"
"github.com/fatih/color"
"github.com/jgblight/matasano/pkg/ciphers"
"github.com/jgblight/matasano/pkg/hashes"
"github.com/jgblight/matasano/pkg/secrets"
"github.com/jgblight/matasano/pkg/utils"
)
const (
dataDir = "/Users/jennifer/go/src/github.com/jgblight/matasano/data/"
)
func problemOne() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
message, err := secrets.GetClientMessage(server)
if err != nil {
return err
}
fmt.Printf("Original Ciphertext: %s\n", message)
c, err := utils.HexToBigint(message)
if err != nil {
return err
}
s := utils.GetBigInt(2)
cPrime := new(big.Int).Exp(s, server.E, server.N)
cPrime.Mul(cPrime, c)
cPrime.Mod(cPrime, server.N)
cHex := hex.EncodeToString(cPrime.Bytes())
fmt.Printf("Modified Ciphertext: %s\n", cHex)
plaintext, err := server.DecryptMessage(cHex)
if err != nil {
return err
}
pPrime := new(big.Int).SetBytes(plaintext)
p, err := ciphers.InvMod(s, server.N)
if err != nil {
return err
}
p.Mul(p, pPrime)
p.Mod(p, server.N)
fmt.Printf(" Recovered Message: %q\n", p.Bytes())
return nil
}
func cubeRoot(n *big.Int) *big.Int {
xN := utils.GetBigInt(1)
diff := utils.GetBigInt(5)
approx := utils.GetBigInt(0)
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
for diff.CmpAbs(approx) == 1 {
t := new(big.Int)
t = t.Div(n, t.Mul(xN, xN))
xN1 := new(big.Int)
xN1 = xN1.Div(xN1.Add(xN1.Mul(two, xN), t), three)
diff = diff.Sub(xN, xN1)
xN = xN1.Set(xN1)
}
return xN
}
func problemTwo() error {
e, d, n, err := ciphers.RSAKeygen(1024)
if err != nil {
return err
}
plaintext := []byte("hi mom")
signature, err := ciphers.PKCS15Sign(plaintext, d, n)
fmt.Printf("Valid Signature: %s\n", signature)
verified := ciphers.PKCS15Verify(plaintext, signature, e, n)
fmt.Printf("Verified: %t\n", verified)
hash, err := hex.DecodeString(hashes.SHA1(plaintext))
if err != nil {
return err
}
padding := utils.MakeRepeatChar('\xff', 10)
padded := append([]byte("\x00\x01"), padding...)
padded = append(padded, '\x00')
padded = append(padded, hash...)
padded = append(padded, utils.MakeRepeatChar('\x00', 95)...)
x := new(big.Int).SetBytes(padded)
y := cubeRoot(x)
y = y.Add(y, utils.GetBigInt(1)) // overestimation > underestimation
forgery := hex.EncodeToString(y.Bytes())
fmt.Printf("Forged Signature: %s\n", forgery)
verified = ciphers.PKCS15Verify(plaintext, forgery, e, n)
fmt.Printf("Verified: %t\n", verified)
return nil
}
func recoverKey(k, H, r, s, q *big.Int) *big.Int {
x := new(big.Int)
r1 := new(big.Int).ModInverse(r, q)
x = x.Mod(x.Mul(x.Sub(x.Mul(s, k), H), r1), q)
return x
}
func problemThree() error {
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
m := []byte("I'm a string")
fmt.Printf("DSA string: %q\n", m)
signature, err := ciphers.DSASign(m, x, params)
if err != nil {
return err
}
fmt.Printf("DSA signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(m, signature, y, params)
if err != nil {
return err
}
fmt.Printf("Verified: %v\n\n", verified)
weakR, _ := new(big.Int).SetString("548099063082341131477253921760299949438196259240", 10)
weakS, _ := new(big.Int).SetString("857042759984254168557880549501802188789837994940", 10)
message := []byte("For those that envy a MC it can be hazardous to your health\nSo be friendly, a matter of life and death, just like a etch-a-sketch\n")
hash, err := utils.HexToBigint(hashes.SHA1(message))
if err != nil {
return err
}
k := new(big.Int)
for i := 1; i <= 65536; i++ {
k = k.SetInt64(int64(i))
r := new(big.Int)
r = r.Mod(r.Exp(params.G, k, params.P), params.Q)
if r.Cmp(weakR) == 0 {
break
}
}
privateKey := recoverKey(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := []string{}
f, err := os.Open(dataDir + input)
if err != nil {
return err
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
str := scanner.Text()
subs := strings.SplitN(str, ": ", 2)
if subs[0] == "m" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 16)
ms = append(ms, n)
} else if subs[0] == "r" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
rs = append(rs, n)
} else if subs[0] == "s" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
ss = append(ss, n)
} else {
msgs = append(msgs, subs[1])
}
}
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
msgCount := len(ms)
k := new(big.Int)
found := false
for i := 0; i < msgCount; i++ {
for j := i; j < msgCount; j++ {
if i == j {
continue
}
if rs[i].Cmp(rs[j]) == 0 {
num := new(big.Int).Sub(ms[i], ms[j])
den := new(big.Int)
den = den.ModInverse(den.Sub(ss[i], ss[j]), params.Q)
k = k.Mod(k.Mul(num, den), params.Q)
hash, err := utils.HexToBigint(hashes.SHA1([]byte(msgs[i])))
if err != nil {
return err
}
privateKey := recoverKey(k, hash, rs[i], ss[i], params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "ca8f6f7c66fa362d40760d135b763eb8527d3d52" {
fmt.Printf("Found key: %v\n", privateKey)
found = true
break
}
}
}
if found {
break
}
}
return nil
}
func problemFive() error {
hello := []byte("Hello, world")
goodbye := []byte("Goodbye, world")
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
params.G = utils.GetBigInt(0)
signature, err := ciphers.DSASign(hello, x, params)
if err != nil {
return err
}
fmt.Println("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage2(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
bits := server.N.BitLen()
lowerBound := utils.GetBigInt(0)
upperBound := new(big.Int).Set(server.N)
multiplier := utils.GetBigInt(2)
two := utils.GetBigInt(2)
printInline(upperBound.Bytes())
for i := 0; i < bits; i++ {
even, err := server.CheckIsEven(rsaMul(c, multiplier, server))
if err != nil {
return err
}
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Interval struct {
Lower *big.Int
Upper *big.Int
B *big.Int
TwoB *big.Int
ThreeB *big.Int
ThreeBSub1 *big.Int
}
func initialInterval(n *big.Int) *Interval {
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
B := new(big.Int).Exp(two, utils.GetBigInt(n.BitLen()-16), nil)
TwoB := new(big.Int).Mul(two, B)
ThreeB := new(big.Int).Mul(three, B)
ThreeBSub1 := new(big.Int).Sub(ThreeB, utils.GetBigInt(1))
return &Interval{TwoB, ThreeBSub1, B, TwoB, ThreeB, ThreeBSub1}
}
func searchS(s0, c *big.Int, server *secrets.RSAServer) (*big.Int, error) {
s1 := new(big.Int).Set(s0)
one := utils.GetBigInt(1)
var err error
valid := false
for !valid {
c1 := rsaMul(c, s1, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, err
}
if valid {
return s1, nil
}
s1 = s1.Add(s1, one)
}
return s1, nil
}
func searchRS(s0, c *big.Int, interval *Interval, server *secrets.RSAServer) (*big.Int, *big.Int, error) |
func nextInterval(interval *Interval, s, r, n *big.Int) *Interval {
rn := new(big.Int).Mul(r, n)
a1 := new(big.Int)
a1 = ceilDiv(a1.Add(interval.TwoB, rn), s)
b1 := new(big.Int)
b1 = b1.Div(b1.Add(interval.ThreeBSub1, rn), s)
var newInt Interval
newInt = *interval
if interval.Lower.Cmp(a1) == -1 {
newInt.Lower = a1
}
if interval.Upper.Cmp(b1) == 1 {
newInt.Upper = b1
}
return &newInt
}
func ceilDiv(x, y *big.Int) *big.Int {
mod := new(big.Int)
zero := utils.GetBigInt(0)
z, mod := new(big.Int).DivMod(x, y, mod)
if mod.Cmp(zero) != 0 {
z = z.Add(z, utils.GetBigInt(1))
}
return z
}
func allIntervals(currentSet []*Interval, s, n *big.Int) []*Interval {
one := utils.GetBigInt(1)
newSet := []*Interval{}
for i := 0; i < len(currentSet); i++ {
bounds := currentSet[i]
minBound := new(big.Int)
minBound = ceilDiv(minBound.Sub(minBound.Mul(bounds.Lower, s), bounds.ThreeBSub1), n)
maxBound := new(big.Int)
maxBound = maxBound.Div(maxBound.Sub(maxBound.Mul(bounds.Upper, s), bounds.TwoB), n)
r := minBound
for r.Cmp(maxBound) != 1 {
next := nextInterval(bounds, s, r, n)
newSet = append(newSet, next)
r = r.Add(r, one)
}
}
return newSet
}
func printIntervals(set []*Interval) {
printInline(set[0].Lower.Bytes())
}
func bleichbacherAttack(c *big.Int, server *secrets.RSAServer) ([]byte, error) {
one := utils.GetBigInt(1)
bounds := initialInterval(server.N)
minS := new(big.Int)
minS = minS.Div(server.N, bounds.ThreeB)
s, err := searchS(minS, c, server)
if err != nil {
return nil, err
}
intervalSet := []*Interval{bounds}
for i := 0; i < 5000; i++ {
intervalSet = allIntervals(intervalSet, s, server.N)
for j := 0; j < len(intervalSet); j++ {
if intervalSet[j].Lower.Cmp(intervalSet[j].Upper) == 0 {
return intervalSet[j].Lower.Bytes(), nil
}
}
printIntervals(intervalSet)
if len(intervalSet) > 1 {
minS = minS.Add(s, one)
s, err = searchS(minS, c, server)
if err != nil {
return nil, err
}
} else {
_, s, err = searchRS(s, c, bounds, server)
if err != nil {
return nil, err
}
}
}
return nil, errors.New("Found nothing")
}
func removePadding(plaintext []byte) []byte {
var i int
for i = 2; i < len(plaintext); i++ {
if plaintext[i] == '\x00' {
break
}
}
return plaintext[i+1 : len(plaintext)]
}
func problemSeven() error {
server, err := secrets.NewRSAServer(256)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage3(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func problemEight() error {
server, err := secrets.NewRSAServer(768)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage4(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func main() {
header := color.New(color.FgCyan, color.Bold)
header.Println("Problem 1: Unpadded Message Recovery Oracle")
err := problemOne()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 2: Bleichenbacher's e=3 RSA Attack")
err = problemTwo()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 3: DSA key recovery from nonce")
err = problemThree()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 4: DSA key recovery from repeated nonce")
err = problemFour("set6challenge44.txt")
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 5: DSA parameter tampering")
err = problemFive()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 6: RSA parity oracle")
err = problemSix()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 7: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 1)")
err = problemSeven()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 8: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 2)")
err = problemEight()
if err != nil {
fmt.Println(err)
}
fmt.Println()
}
| {
one := utils.GetBigInt(1)
r := new(big.Int)
r = ceilDiv(r.Mul(utils.GetBigInt(2), r.Sub(r.Mul(interval.Upper, s0), interval.TwoB)), server.N)
s := new(big.Int)
minS := new(big.Int)
maxS := new(big.Int)
var err error
valid := false
for r.Cmp(server.N) == -1 {
rn := new(big.Int).Mul(r, server.N)
minS = minS.Div(minS.Add(interval.TwoB, rn), interval.Upper)
maxS = maxS.Div(maxS.Add(interval.ThreeB, rn), interval.Lower)
for s.Set(minS); s.Cmp(maxS) == -1; s.Add(s, one) {
c1 := rsaMul(c, s, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, nil, err
}
if valid {
return r, s, nil
}
}
r = r.Add(r, one)
}
return nil, nil, errors.New("could not find parameters")
} | identifier_body |
set6.go | package main
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"math/big"
"os"
"strings"
"time"
"github.com/fatih/color"
"github.com/jgblight/matasano/pkg/ciphers"
"github.com/jgblight/matasano/pkg/hashes"
"github.com/jgblight/matasano/pkg/secrets"
"github.com/jgblight/matasano/pkg/utils"
)
const (
dataDir = "/Users/jennifer/go/src/github.com/jgblight/matasano/data/"
)
func problemOne() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
message, err := secrets.GetClientMessage(server)
if err != nil {
return err
}
fmt.Printf("Original Ciphertext: %s\n", message)
c, err := utils.HexToBigint(message)
if err != nil {
return err
}
s := utils.GetBigInt(2)
cPrime := new(big.Int).Exp(s, server.E, server.N)
cPrime.Mul(cPrime, c)
cPrime.Mod(cPrime, server.N)
cHex := hex.EncodeToString(cPrime.Bytes())
fmt.Printf("Modified Ciphertext: %s\n", cHex)
plaintext, err := server.DecryptMessage(cHex)
if err != nil {
return err
}
pPrime := new(big.Int).SetBytes(plaintext)
p, err := ciphers.InvMod(s, server.N)
if err != nil {
return err
}
p.Mul(p, pPrime)
p.Mod(p, server.N)
fmt.Printf(" Recovered Message: %q\n", p.Bytes())
return nil
}
func cubeRoot(n *big.Int) *big.Int {
xN := utils.GetBigInt(1)
diff := utils.GetBigInt(5)
approx := utils.GetBigInt(0)
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
for diff.CmpAbs(approx) == 1 {
t := new(big.Int)
t = t.Div(n, t.Mul(xN, xN))
xN1 := new(big.Int)
xN1 = xN1.Div(xN1.Add(xN1.Mul(two, xN), t), three)
diff = diff.Sub(xN, xN1)
xN = xN1.Set(xN1)
}
return xN
}
func problemTwo() error {
e, d, n, err := ciphers.RSAKeygen(1024)
if err != nil {
return err
}
plaintext := []byte("hi mom")
signature, err := ciphers.PKCS15Sign(plaintext, d, n)
fmt.Printf("Valid Signature: %s\n", signature)
verified := ciphers.PKCS15Verify(plaintext, signature, e, n)
fmt.Printf("Verified: %t\n", verified)
hash, err := hex.DecodeString(hashes.SHA1(plaintext))
if err != nil {
return err
}
padding := utils.MakeRepeatChar('\xff', 10)
padded := append([]byte("\x00\x01"), padding...)
padded = append(padded, '\x00')
padded = append(padded, hash...)
padded = append(padded, utils.MakeRepeatChar('\x00', 95)...)
x := new(big.Int).SetBytes(padded)
y := cubeRoot(x)
y = y.Add(y, utils.GetBigInt(1)) // overestimation > underestimation
forgery := hex.EncodeToString(y.Bytes())
fmt.Printf("Forged Signature: %s\n", forgery)
verified = ciphers.PKCS15Verify(plaintext, forgery, e, n)
fmt.Printf("Verified: %t\n", verified)
return nil
}
func recoverKey(k, H, r, s, q *big.Int) *big.Int {
x := new(big.Int)
r1 := new(big.Int).ModInverse(r, q)
x = x.Mod(x.Mul(x.Sub(x.Mul(s, k), H), r1), q)
return x
}
func problemThree() error {
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
m := []byte("I'm a string")
fmt.Printf("DSA string: %q\n", m)
signature, err := ciphers.DSASign(m, x, params)
if err != nil {
return err
}
fmt.Printf("DSA signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(m, signature, y, params)
if err != nil {
return err
}
fmt.Printf("Verified: %v\n\n", verified)
weakR, _ := new(big.Int).SetString("548099063082341131477253921760299949438196259240", 10)
weakS, _ := new(big.Int).SetString("857042759984254168557880549501802188789837994940", 10)
message := []byte("For those that envy a MC it can be hazardous to your health\nSo be friendly, a matter of life and death, just like a etch-a-sketch\n")
hash, err := utils.HexToBigint(hashes.SHA1(message))
if err != nil {
return err
}
k := new(big.Int)
for i := 1; i <= 65536; i++ {
k = k.SetInt64(int64(i))
r := new(big.Int)
r = r.Mod(r.Exp(params.G, k, params.P), params.Q)
if r.Cmp(weakR) == 0 {
break
}
}
privateKey := recoverKey(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := []string{}
f, err := os.Open(dataDir + input)
if err != nil {
return err
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
str := scanner.Text()
subs := strings.SplitN(str, ": ", 2)
if subs[0] == "m" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 16)
ms = append(ms, n)
} else if subs[0] == "r" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
rs = append(rs, n)
} else if subs[0] == "s" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
ss = append(ss, n)
} else {
msgs = append(msgs, subs[1])
}
}
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
msgCount := len(ms)
k := new(big.Int)
found := false
for i := 0; i < msgCount; i++ {
for j := i; j < msgCount; j++ {
if i == j {
continue
}
if rs[i].Cmp(rs[j]) == 0 {
num := new(big.Int).Sub(ms[i], ms[j])
den := new(big.Int)
den = den.ModInverse(den.Sub(ss[i], ss[j]), params.Q)
k = k.Mod(k.Mul(num, den), params.Q)
hash, err := utils.HexToBigint(hashes.SHA1([]byte(msgs[i])))
if err != nil {
return err
}
privateKey := recoverKey(k, hash, rs[i], ss[i], params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "ca8f6f7c66fa362d40760d135b763eb8527d3d52" {
fmt.Printf("Found key: %v\n", privateKey)
found = true
break
}
}
}
if found {
break
}
}
return nil
}
func problemFive() error {
hello := []byte("Hello, world")
goodbye := []byte("Goodbye, world")
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
params.G = utils.GetBigInt(0)
signature, err := ciphers.DSASign(hello, x, params)
if err != nil {
return err
}
fmt.Println("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage2(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
bits := server.N.BitLen()
lowerBound := utils.GetBigInt(0)
upperBound := new(big.Int).Set(server.N)
multiplier := utils.GetBigInt(2)
two := utils.GetBigInt(2)
printInline(upperBound.Bytes())
for i := 0; i < bits; i++ {
even, err := server.CheckIsEven(rsaMul(c, multiplier, server))
if err != nil {
return err
}
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Interval struct {
Lower *big.Int
Upper *big.Int
B *big.Int
TwoB *big.Int
ThreeB *big.Int
ThreeBSub1 *big.Int
}
func initialInterval(n *big.Int) *Interval {
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
B := new(big.Int).Exp(two, utils.GetBigInt(n.BitLen()-16), nil)
TwoB := new(big.Int).Mul(two, B)
ThreeB := new(big.Int).Mul(three, B)
ThreeBSub1 := new(big.Int).Sub(ThreeB, utils.GetBigInt(1))
return &Interval{TwoB, ThreeBSub1, B, TwoB, ThreeB, ThreeBSub1}
}
func searchS(s0, c *big.Int, server *secrets.RSAServer) (*big.Int, error) {
s1 := new(big.Int).Set(s0)
one := utils.GetBigInt(1)
var err error
valid := false
for !valid {
c1 := rsaMul(c, s1, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, err
}
if valid {
return s1, nil
}
s1 = s1.Add(s1, one)
}
return s1, nil
}
func searchRS(s0, c *big.Int, interval *Interval, server *secrets.RSAServer) (*big.Int, *big.Int, error) {
one := utils.GetBigInt(1)
r := new(big.Int)
r = ceilDiv(r.Mul(utils.GetBigInt(2), r.Sub(r.Mul(interval.Upper, s0), interval.TwoB)), server.N)
s := new(big.Int)
minS := new(big.Int)
maxS := new(big.Int)
var err error
valid := false
for r.Cmp(server.N) == -1 {
rn := new(big.Int).Mul(r, server.N)
minS = minS.Div(minS.Add(interval.TwoB, rn), interval.Upper)
maxS = maxS.Div(maxS.Add(interval.ThreeB, rn), interval.Lower)
for s.Set(minS); s.Cmp(maxS) == -1; s.Add(s, one) {
c1 := rsaMul(c, s, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, nil, err
}
if valid {
return r, s, nil
}
}
r = r.Add(r, one)
}
return nil, nil, errors.New("could not find parameters")
}
func nextInterval(interval *Interval, s, r, n *big.Int) *Interval {
rn := new(big.Int).Mul(r, n)
a1 := new(big.Int)
a1 = ceilDiv(a1.Add(interval.TwoB, rn), s)
b1 := new(big.Int)
b1 = b1.Div(b1.Add(interval.ThreeBSub1, rn), s)
var newInt Interval
newInt = *interval
if interval.Lower.Cmp(a1) == -1 {
newInt.Lower = a1
}
if interval.Upper.Cmp(b1) == 1 {
newInt.Upper = b1
}
return &newInt
}
func ceilDiv(x, y *big.Int) *big.Int {
mod := new(big.Int)
zero := utils.GetBigInt(0)
z, mod := new(big.Int).DivMod(x, y, mod)
if mod.Cmp(zero) != 0 {
z = z.Add(z, utils.GetBigInt(1))
}
return z
}
func allIntervals(currentSet []*Interval, s, n *big.Int) []*Interval {
one := utils.GetBigInt(1)
newSet := []*Interval{}
for i := 0; i < len(currentSet); i++ {
bounds := currentSet[i]
minBound := new(big.Int)
minBound = ceilDiv(minBound.Sub(minBound.Mul(bounds.Lower, s), bounds.ThreeBSub1), n)
maxBound := new(big.Int)
maxBound = maxBound.Div(maxBound.Sub(maxBound.Mul(bounds.Upper, s), bounds.TwoB), n)
r := minBound
for r.Cmp(maxBound) != 1 {
next := nextInterval(bounds, s, r, n)
newSet = append(newSet, next)
r = r.Add(r, one)
}
}
return newSet
}
func printIntervals(set []*Interval) {
printInline(set[0].Lower.Bytes())
}
func bleichbacherAttack(c *big.Int, server *secrets.RSAServer) ([]byte, error) {
one := utils.GetBigInt(1)
bounds := initialInterval(server.N)
minS := new(big.Int)
minS = minS.Div(server.N, bounds.ThreeB)
s, err := searchS(minS, c, server)
if err != nil {
return nil, err
}
intervalSet := []*Interval{bounds}
for i := 0; i < 5000; i++ {
intervalSet = allIntervals(intervalSet, s, server.N)
for j := 0; j < len(intervalSet); j++ {
if intervalSet[j].Lower.Cmp(intervalSet[j].Upper) == 0 {
return intervalSet[j].Lower.Bytes(), nil
}
}
printIntervals(intervalSet)
if len(intervalSet) > 1 {
minS = minS.Add(s, one)
s, err = searchS(minS, c, server)
if err != nil {
return nil, err
} | }
}
}
return nil, errors.New("Found nothing")
}
func removePadding(plaintext []byte) []byte {
var i int
for i = 2; i < len(plaintext); i++ {
if plaintext[i] == '\x00' {
break
}
}
return plaintext[i+1 : len(plaintext)]
}
func problemSeven() error {
server, err := secrets.NewRSAServer(256)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage3(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func problemEight() error {
server, err := secrets.NewRSAServer(768)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage4(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func main() {
header := color.New(color.FgCyan, color.Bold)
header.Println("Problem 1: Unpadded Message Recovery Oracle")
err := problemOne()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 2: Bleichenbacher's e=3 RSA Attack")
err = problemTwo()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 3: DSA key recovery from nonce")
err = problemThree()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 4: DSA key recovery from repeated nonce")
err = problemFour("set6challenge44.txt")
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 5: DSA parameter tampering")
err = problemFive()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 6: RSA parity oracle")
err = problemSix()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 7: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 1)")
err = problemSeven()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 8: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 2)")
err = problemEight()
if err != nil {
fmt.Println(err)
}
fmt.Println()
} | } else {
_, s, err = searchRS(s, c, bounds, server)
if err != nil {
return nil, err | random_line_split |
set6.go | package main
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"math/big"
"os"
"strings"
"time"
"github.com/fatih/color"
"github.com/jgblight/matasano/pkg/ciphers"
"github.com/jgblight/matasano/pkg/hashes"
"github.com/jgblight/matasano/pkg/secrets"
"github.com/jgblight/matasano/pkg/utils"
)
const (
dataDir = "/Users/jennifer/go/src/github.com/jgblight/matasano/data/"
)
func problemOne() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
message, err := secrets.GetClientMessage(server)
if err != nil {
return err
}
fmt.Printf("Original Ciphertext: %s\n", message)
c, err := utils.HexToBigint(message)
if err != nil {
return err
}
s := utils.GetBigInt(2)
cPrime := new(big.Int).Exp(s, server.E, server.N)
cPrime.Mul(cPrime, c)
cPrime.Mod(cPrime, server.N)
cHex := hex.EncodeToString(cPrime.Bytes())
fmt.Printf("Modified Ciphertext: %s\n", cHex)
plaintext, err := server.DecryptMessage(cHex)
if err != nil {
return err
}
pPrime := new(big.Int).SetBytes(plaintext)
p, err := ciphers.InvMod(s, server.N)
if err != nil {
return err
}
p.Mul(p, pPrime)
p.Mod(p, server.N)
fmt.Printf(" Recovered Message: %q\n", p.Bytes())
return nil
}
func cubeRoot(n *big.Int) *big.Int {
xN := utils.GetBigInt(1)
diff := utils.GetBigInt(5)
approx := utils.GetBigInt(0)
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
for diff.CmpAbs(approx) == 1 {
t := new(big.Int)
t = t.Div(n, t.Mul(xN, xN))
xN1 := new(big.Int)
xN1 = xN1.Div(xN1.Add(xN1.Mul(two, xN), t), three)
diff = diff.Sub(xN, xN1)
xN = xN1.Set(xN1)
}
return xN
}
func | () error {
e, d, n, err := ciphers.RSAKeygen(1024)
if err != nil {
return err
}
plaintext := []byte("hi mom")
signature, err := ciphers.PKCS15Sign(plaintext, d, n)
fmt.Printf("Valid Signature: %s\n", signature)
verified := ciphers.PKCS15Verify(plaintext, signature, e, n)
fmt.Printf("Verified: %t\n", verified)
hash, err := hex.DecodeString(hashes.SHA1(plaintext))
if err != nil {
return err
}
padding := utils.MakeRepeatChar('\xff', 10)
padded := append([]byte("\x00\x01"), padding...)
padded = append(padded, '\x00')
padded = append(padded, hash...)
padded = append(padded, utils.MakeRepeatChar('\x00', 95)...)
x := new(big.Int).SetBytes(padded)
y := cubeRoot(x)
y = y.Add(y, utils.GetBigInt(1)) // overestimation > underestimation
forgery := hex.EncodeToString(y.Bytes())
fmt.Printf("Forged Signature: %s\n", forgery)
verified = ciphers.PKCS15Verify(plaintext, forgery, e, n)
fmt.Printf("Verified: %t\n", verified)
return nil
}
func recoverKey(k, H, r, s, q *big.Int) *big.Int {
x := new(big.Int)
r1 := new(big.Int).ModInverse(r, q)
x = x.Mod(x.Mul(x.Sub(x.Mul(s, k), H), r1), q)
return x
}
func problemThree() error {
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
m := []byte("I'm a string")
fmt.Printf("DSA string: %q\n", m)
signature, err := ciphers.DSASign(m, x, params)
if err != nil {
return err
}
fmt.Printf("DSA signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(m, signature, y, params)
if err != nil {
return err
}
fmt.Printf("Verified: %v\n\n", verified)
weakR, _ := new(big.Int).SetString("548099063082341131477253921760299949438196259240", 10)
weakS, _ := new(big.Int).SetString("857042759984254168557880549501802188789837994940", 10)
message := []byte("For those that envy a MC it can be hazardous to your health\nSo be friendly, a matter of life and death, just like a etch-a-sketch\n")
hash, err := utils.HexToBigint(hashes.SHA1(message))
if err != nil {
return err
}
k := new(big.Int)
for i := 1; i <= 65536; i++ {
k = k.SetInt64(int64(i))
r := new(big.Int)
r = r.Mod(r.Exp(params.G, k, params.P), params.Q)
if r.Cmp(weakR) == 0 {
break
}
}
privateKey := recoverKey(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := []string{}
f, err := os.Open(dataDir + input)
if err != nil {
return err
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
str := scanner.Text()
subs := strings.SplitN(str, ": ", 2)
if subs[0] == "m" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 16)
ms = append(ms, n)
} else if subs[0] == "r" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
rs = append(rs, n)
} else if subs[0] == "s" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
ss = append(ss, n)
} else {
msgs = append(msgs, subs[1])
}
}
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
msgCount := len(ms)
k := new(big.Int)
found := false
for i := 0; i < msgCount; i++ {
for j := i; j < msgCount; j++ {
if i == j {
continue
}
if rs[i].Cmp(rs[j]) == 0 {
num := new(big.Int).Sub(ms[i], ms[j])
den := new(big.Int)
den = den.ModInverse(den.Sub(ss[i], ss[j]), params.Q)
k = k.Mod(k.Mul(num, den), params.Q)
hash, err := utils.HexToBigint(hashes.SHA1([]byte(msgs[i])))
if err != nil {
return err
}
privateKey := recoverKey(k, hash, rs[i], ss[i], params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "ca8f6f7c66fa362d40760d135b763eb8527d3d52" {
fmt.Printf("Found key: %v\n", privateKey)
found = true
break
}
}
}
if found {
break
}
}
return nil
}
func problemFive() error {
hello := []byte("Hello, world")
goodbye := []byte("Goodbye, world")
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
params.G = utils.GetBigInt(0)
signature, err := ciphers.DSASign(hello, x, params)
if err != nil {
return err
}
fmt.Println("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage2(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
bits := server.N.BitLen()
lowerBound := utils.GetBigInt(0)
upperBound := new(big.Int).Set(server.N)
multiplier := utils.GetBigInt(2)
two := utils.GetBigInt(2)
printInline(upperBound.Bytes())
for i := 0; i < bits; i++ {
even, err := server.CheckIsEven(rsaMul(c, multiplier, server))
if err != nil {
return err
}
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Interval struct {
Lower *big.Int
Upper *big.Int
B *big.Int
TwoB *big.Int
ThreeB *big.Int
ThreeBSub1 *big.Int
}
func initialInterval(n *big.Int) *Interval {
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
B := new(big.Int).Exp(two, utils.GetBigInt(n.BitLen()-16), nil)
TwoB := new(big.Int).Mul(two, B)
ThreeB := new(big.Int).Mul(three, B)
ThreeBSub1 := new(big.Int).Sub(ThreeB, utils.GetBigInt(1))
return &Interval{TwoB, ThreeBSub1, B, TwoB, ThreeB, ThreeBSub1}
}
func searchS(s0, c *big.Int, server *secrets.RSAServer) (*big.Int, error) {
s1 := new(big.Int).Set(s0)
one := utils.GetBigInt(1)
var err error
valid := false
for !valid {
c1 := rsaMul(c, s1, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, err
}
if valid {
return s1, nil
}
s1 = s1.Add(s1, one)
}
return s1, nil
}
func searchRS(s0, c *big.Int, interval *Interval, server *secrets.RSAServer) (*big.Int, *big.Int, error) {
one := utils.GetBigInt(1)
r := new(big.Int)
r = ceilDiv(r.Mul(utils.GetBigInt(2), r.Sub(r.Mul(interval.Upper, s0), interval.TwoB)), server.N)
s := new(big.Int)
minS := new(big.Int)
maxS := new(big.Int)
var err error
valid := false
for r.Cmp(server.N) == -1 {
rn := new(big.Int).Mul(r, server.N)
minS = minS.Div(minS.Add(interval.TwoB, rn), interval.Upper)
maxS = maxS.Div(maxS.Add(interval.ThreeB, rn), interval.Lower)
for s.Set(minS); s.Cmp(maxS) == -1; s.Add(s, one) {
c1 := rsaMul(c, s, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, nil, err
}
if valid {
return r, s, nil
}
}
r = r.Add(r, one)
}
return nil, nil, errors.New("could not find parameters")
}
func nextInterval(interval *Interval, s, r, n *big.Int) *Interval {
rn := new(big.Int).Mul(r, n)
a1 := new(big.Int)
a1 = ceilDiv(a1.Add(interval.TwoB, rn), s)
b1 := new(big.Int)
b1 = b1.Div(b1.Add(interval.ThreeBSub1, rn), s)
var newInt Interval
newInt = *interval
if interval.Lower.Cmp(a1) == -1 {
newInt.Lower = a1
}
if interval.Upper.Cmp(b1) == 1 {
newInt.Upper = b1
}
return &newInt
}
func ceilDiv(x, y *big.Int) *big.Int {
mod := new(big.Int)
zero := utils.GetBigInt(0)
z, mod := new(big.Int).DivMod(x, y, mod)
if mod.Cmp(zero) != 0 {
z = z.Add(z, utils.GetBigInt(1))
}
return z
}
func allIntervals(currentSet []*Interval, s, n *big.Int) []*Interval {
one := utils.GetBigInt(1)
newSet := []*Interval{}
for i := 0; i < len(currentSet); i++ {
bounds := currentSet[i]
minBound := new(big.Int)
minBound = ceilDiv(minBound.Sub(minBound.Mul(bounds.Lower, s), bounds.ThreeBSub1), n)
maxBound := new(big.Int)
maxBound = maxBound.Div(maxBound.Sub(maxBound.Mul(bounds.Upper, s), bounds.TwoB), n)
r := minBound
for r.Cmp(maxBound) != 1 {
next := nextInterval(bounds, s, r, n)
newSet = append(newSet, next)
r = r.Add(r, one)
}
}
return newSet
}
func printIntervals(set []*Interval) {
printInline(set[0].Lower.Bytes())
}
func bleichbacherAttack(c *big.Int, server *secrets.RSAServer) ([]byte, error) {
one := utils.GetBigInt(1)
bounds := initialInterval(server.N)
minS := new(big.Int)
minS = minS.Div(server.N, bounds.ThreeB)
s, err := searchS(minS, c, server)
if err != nil {
return nil, err
}
intervalSet := []*Interval{bounds}
for i := 0; i < 5000; i++ {
intervalSet = allIntervals(intervalSet, s, server.N)
for j := 0; j < len(intervalSet); j++ {
if intervalSet[j].Lower.Cmp(intervalSet[j].Upper) == 0 {
return intervalSet[j].Lower.Bytes(), nil
}
}
printIntervals(intervalSet)
if len(intervalSet) > 1 {
minS = minS.Add(s, one)
s, err = searchS(minS, c, server)
if err != nil {
return nil, err
}
} else {
_, s, err = searchRS(s, c, bounds, server)
if err != nil {
return nil, err
}
}
}
return nil, errors.New("Found nothing")
}
func removePadding(plaintext []byte) []byte {
var i int
for i = 2; i < len(plaintext); i++ {
if plaintext[i] == '\x00' {
break
}
}
return plaintext[i+1 : len(plaintext)]
}
func problemSeven() error {
server, err := secrets.NewRSAServer(256)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage3(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func problemEight() error {
server, err := secrets.NewRSAServer(768)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage4(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
fmt.Printf("Original: %v\n", ciphertext)
solution, err := bleichbacherAttack(c, server)
if err != nil {
return err
}
fmt.Printf("Found Message: %q \n", removePadding(solution))
return nil
}
func main() {
header := color.New(color.FgCyan, color.Bold)
header.Println("Problem 1: Unpadded Message Recovery Oracle")
err := problemOne()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 2: Bleichenbacher's e=3 RSA Attack")
err = problemTwo()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 3: DSA key recovery from nonce")
err = problemThree()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 4: DSA key recovery from repeated nonce")
err = problemFour("set6challenge44.txt")
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 5: DSA parameter tampering")
err = problemFive()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 6: RSA parity oracle")
err = problemSix()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 7: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 1)")
err = problemSeven()
if err != nil {
fmt.Println(err)
}
fmt.Println()
header.Println("Problem 8: Bleichenbacher's PKCS 1.5 Padding Oracle (Part 2)")
err = problemEight()
if err != nil {
fmt.Println(err)
}
fmt.Println()
}
| problemTwo | identifier_name |
OrganizationListDetail.ts | const imgHostname = 'https://cyxbsmobile.redrock.team/static/register/'
interface PosterStyle {
backgroundImage: string
}
interface Department {
name: string
posterStyle: PosterStyle
introduction: string
}
interface Organization {
name: string
departmentList: Array<Department>
}
interface OrganizationListDetail {
hongyan: Organization
bangongshi: Organization
zuzhibu: Organization
xuanchuanbu: Organization
xueshenghui: Organization
qingxie: Organization
dayituan: Organization
kelian: Organization
shelian: Organization
yanhui: Organization
huweidui: Organization
}
const organizationListDetail: OrganizationListDetail = {
hongyan: {
name: '红岩网校工作站',
departmentList: [
{
name: '产品策划及运营部',
posterStyle: {
backgroundImage: `url(${imgHostname}hongyan/hongyan-pm.png)`
},
introduction:
'Ta是用户需求的倾听者,也是提出解决方案的总设计师。活跃在各部门之间,是促进各部门沟通的桥梁。Ta是创意的缔造者,拥有独特严谨的产品思维。一份原型图、一篇PRD可以让程序员和设计师开启工作模式,也可以写出优秀的文案对活动和产品形成强有力的助推,Office、Axure、Xmind等软件是Ta们的好帮手。产品策划及运营部旨在培养出优秀的产品经理,产品运营者和新媒体运营师。 '
},
{
name: '视觉设计部',
posterStyle: {
backgroundImage: `url(${imgHostname}hongyan/hongyan-design.png)`
},
introduction:
'Ta是设计产品图形用户界面,跟踪产品视觉及体验效果的设计师。需要经常同设计软件打交道的Ta们,主要负责针对pc端、移动端、海报等界面的设计,团队协作设定产品整体界面视觉风格与创意规划,对设计成果进行定期评估和研究,进行视觉元素的优化设计。视觉设计部旨在设计出优秀的互联网产品,培养插画设计师、交互设计师、视觉设计师和UI设计师。 '
},
{
name: 'Web研发部',
posterStyle: {
backgroundImage: `url(${imgHostname}hongyan/hongyan-web.png)`
},
introduction:
'Ta的职责包括“重邮小帮手”微信公众号、移动端/PC端网站开发、助力掌上重邮APP、提供强大的后台驱动等。目前分为前端和后端方向,前端主要采用html、css、JavaScript等利器,针对PC端、移动端等不同交互领域采用不同的设计风格与技术,将视觉无限的创意以行行代码形式转化为交互良好的网站页面,并通过一系列优化手段使用户体验达到最佳。后端主要负责将产品细致入微的现实需求转化为清晰的数据模型与业务逻辑,采用PHP、Java、Golang等开发语言操作数据库并进行严格的分析处理,掌控数据传递处理路上的每个流程,从而为网校提供最为坚实的数据后盾。'
},
{
name: '移动开发部',
posterStyle: {
backgroundImage: `url(${imgHostname}hongyan/hongyan-mobile.png)`
},
introduction:
'Ta主要负责移动端产品的开发,以 Java & Kotlin,Objective-C & Swift为主要开发语言,涵盖Android、iOS两个平台。在这里你不但可以学到App开发,还能认识一大群志同道合的朋友。如果你对App开发感兴趣,对App制作有欲望,移动开发部将是你的秀场。来吧,如果你曾抱怨被大神虐哭,现在一个成为大神的机会等着你!'
},
{
name: '运维安全部',
posterStyle: {
backgroundImage: `url(${imgHostname}hongyan/hongyan-sre.png)`
},
introduction:
'Ta既是红岩网校运维安全部(内部称之为 网校 SRE ( Site Reliability Engineering )),也是重庆邮电大学 LUG(Linux User Group) ,主要负责维护网校的服务器稳定和安全,同时也负责新项目的部署、环境安全配置和性能优化,还负责了重庆邮电大学的公益项目:重庆邮电大学开源镜像站。Ta们使用 Python语言来开发好玩有趣的项目,在这里你可以利用服务器资源来搭建自己想做的任何项目。你要对技术执着、对高性能孜孜不倦,“如果不能优雅的解决这个问题,那我们就有了两个问题”。'
}
]
},
bangongshi: {
name: '团委办公室',
departmentList: [
{
name: '团委办公室',
posterStyle: {
backgroundImage: `url(${imgHostname}sanbushi/sanbushi-bangong.png)`
},
introduction:
'重庆邮电大学团委办公室(以下简称办公室)是共青团重庆邮电大学委员会直属机构之一。是学校团学工作的枢纽与核心,在团委与各校级学生组织之间起到桥梁与纽带的作用,是所有学生组织中与老师联系最密切的部门,是团委老师的得力助手。办公室负责团委工作的上传下达——向上通过制作团委工作简报《重邮共青团》等,向团中央、团市委汇报学校团学工作动态;向下沟通联系各校级学生组织,传达团委相关工作要求,并针对学生组织相关工作予以监督。参与学生组织各类活动的筹备工作,负责学生活动场地的管理工作,监督并完善团建在线网站的建设。同时,办公室还协助完成学校大型活动的会务,秘书工作,协助完成大学生志愿者暑期“三下乡”社会实践活动,负责各项文件,奖状资料的管理存档工作。'
},
]
},
zuzhibu: {
name: '团委组织部',
departmentList: [
{
name: '团委组织部',
posterStyle: {
backgroundImage: `url(${imgHostname}sanbushi/sanbushi-zuzhi.png)`
},
introduction:
'重庆邮电大学团委组织部是校团委核心直属部室。组织部以“严谨 高效 励志 笃行”作为部训,协调统筹各学院团总支及基层团支部开展工作。是负责全校共青团的基层组织建设工作、推优入党工作、五四评优表彰工作、开展青年马克思主义培养工程、指导主题团日活动的开展和团支部“活力提升”工程的实施以及开展班级团支部和班委会一体化运行机制改革等工作的职能部门。'
},
]
},
xuanchuanbu: {
name: '团委宣传部',
departmentList: [
{
name: '团委宣传部',
posterStyle: {
backgroundImage: `url(${imgHostname}sanbushi/sanbushi-xuanchuan.png)`
},
introduction:
'校团委宣传部是我校团委的重要部门,担负着向学校、校团委各学生组织传达信息的重要任务,是校团委各部门正常工作的基本保证。宣传部是负责全校工作的宣传、报道,传达学校和团委的工作指示,会议通知,对广大团员青年进行思想政治教育、素质教育的宣传工作;负责通过网络、海报、橱窗、等形式开展政治思想教育工作。宣传部以丰富校园文化生活,活跃同学们的第二课堂,丰富广大同学的课余生活为己任,带领各团总支宣传部开展工作。'
},
]
},
xueshenghui: {
name: '校学生会',
departmentList: [
{
name: '综合部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-zonghe.png)`
},
introduction:
'综合部以其“承上启下”的重要作用在重庆邮电大学校学生会中拥有一席之地。综合部作为学生会的“中心枢纽”,默默地在学生会中付出,清除同学们的后顾之忧。 在过去的时间里,综合部协助各个部门举办了众多活动,与各个学院学院、各个部门有多方面的交流。除了和校内的部门保持联系,外联也是综合部重要的一部分,综合部积极同重庆市各高校保持联络。在综合部可以提高自己的办公和沟通能力。'
},
{
name: '学习部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-xuexi.png)`
},
introduction:
'学习部是以学校学风建设为中心开展一系列活动,包括“信科杯辩论赛"“新生杯辩论赛”"一站到底”等。学习部让你学到的从来都不是如何提高成绩,而是综合能力的提升和不同层面的沟通能力,每一个活动的前期准备,策划,现场模拟及掌控都是由学习部全权负责,怎样组织一场类似辩论赛的正式活动,这样的活动应该注意什么。“人活一世,年少一去不复返,当然要活出少年人的潇洒肆意。”欢迎萌新们加入学习部肆意的绽放你们的才华!'
},
{
name: '宣传部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-xuanchuan.png)`
},
introduction:
'是学生会工作的一个窗口,肩负着对内宣传学校、学生会动态,对外展现我校风采的职责。我部秉承着“全心全意为学生服务”的原则,切实做好各类宣传教育工作,把握正确的舆论导向,是广大学生与学生会的桥梁和纽带。我部工作主要为新媒体宣传、平面设计和视频制作三大块,线上和线下及时宣传,作用于学生会的各类活动的全过程。宣传部是学生会最具思想、更练能力、更需创新的部门之一。'
},
{
name: '权益提案部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-quanyi.png)`
},
introduction:
'“解决同学们‘食、住、行’的一切问题”是我们不变的工作重心。我们时刻为解决同学们疑难问题而准备着,除了进行包括“问题收集”、“问题解决”和“进度跟进”三大方面的日常工作,我们还将针对代表性问题不定期开展如“权小益见面会”、“提案大赛”等线上线下相关活动,力求听到每位重邮学子的声音的同时,也希望促进同学们表达沟通的能力。当然我们会及时和学校有关部门进行有效沟通对接,使每个同学合理的诉求得到解决,从而有效鼓励同学们为自己维权为自己发声。'
},
{
name: '生活服务部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-fuwu.png)`
},
introduction:
'承担同学和食堂沟通的桥梁工作,为学生搭建与学校食堂、学校后勤部门以及学校宿舍管理等部门沟通的桥梁。全校同学对于校园生活存在各种不满都可以向本部门反馈,我们将把收集到的建议和意见合理化过后及时反馈给有关后勤部门,进而建立健全反馈机制,优化校园后勤工作开展。'
},
{
name: '文艺部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-wenyi.png)`
},
introduction:
'重庆邮电大学学生会文艺部是一个开展各项校园文艺活动的职能部门,活跃校园文化气氛,丰富广大同学业余文化生活。主要举办“英悦重邮”、“草坪音乐会”、“校园十大歌手”、“诗悦流芳”、“室内音乐会”等校级比赛类节目、欣赏类节目,并会协助其他部门或者学院一起进行创新类节目的策划、承办等。'
},
{
name: '体育部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-tiyu.png)`
},
introduction:
'体育部是学校开展体育活动的主要组织部门。体育部多次成功举办校园大型户外体育活动,如极速重邮、力拔山河等。体育部有效的组织的体育活动,给同学提供更多的课余活动时间,丰富了同学们的课余活动,提高了同学参加体育锻炼的积极性。'
},
{
name: '女生部',
posterStyle: {
backgroundImage: `url(${imgHostname}xueshenghui/xueshenghui-nvsheng.png)`
},
introduction:
'女生部作为我校特色部门是专门为女生设立的服务部门,是一个以女生活动为主的特殊部门。女生部结合女同学的生活方式和其他方面的特点,针对女生开展有益的活动,配合学校有关部门解决女同学在学习、生活中的困难,维护女同学的正当权益。女生部更注重着力于学生整体形象塑造,注重于展现新一代大学生的独特的风采和精神面貌。'
}
]
},
qingxie: {
name: '青年志愿者协会',
departmentList: [
{
name: '综合管理部',
posterStyle: {
backgroundImage: `url(${imgHostname}qingxie/qingxie-zonghe.png)`
},
introduction:
'综合管理部负责青年志愿者协会内部事务的协调与管理,对学院的志愿活动的对接与考核,对青年志愿者协会内部物资进行管理,对学院志愿活动资料的收集整理及评分,以及负责志愿活动时长的管理和添加,在整个部门中起着纽带作用。在青春中奋斗,在最好的时光遇见综合,相约开启志愿之行!'
},
{
name: '服务总队',
posterStyle: {
backgroundImage: `url(${imgHostname}qingxie/qingxie-fuwu.png)`
},
introduction:
'天天节电活动,我们行走在每一栋教学楼,关闭电器,为环保助力。天天咨询活动中,是我们为退休教师解答疑惑的身影。夜晚的操场,天天护跑的志愿者保驾护航,为夜跑人提供存包服务。九月,你们迎来了大学,我们遇见了怀揣梦想朝气蓬勃的你,希望你梦想的起点,从青年志愿者协会服务总队开始。'
},
{
name: '实践服务部',
posterStyle: {
backgroundImage: `url(${imgHostname}qingxie/qingxie-shijian.png)`
},
introduction:
'实践服务部主要负责校外志愿服务活动,社区市民学校建设,日常开展“四点半课堂”以及“周末课堂”等志愿活动。并负责规范管理校内各学院志愿活动,以及大学生暑期“三下乡”社会实践活动相关工作。以志愿之心,以志愿之行,你我携手,播下一颗爱的种子。实践服务部期待与你们的志愿之约!'
},
{
name: '宣传推广部',
posterStyle: {
backgroundImage: `url(${imgHostname}qingxie/qingxie-xuanchuan.png)`
},
introduction:
'宣传推广部负责官方QQ的日常运营以及微信推送,各项志愿活动的宣传,校内外志愿活动的跟踪纪录;同时对志愿服务品牌项目进行推广,通过拍摄活动照片、推送优质文案以及录制视频直播等形式,使更多的人了解青协的日常,了解志愿活动。不忘初心方得始终,让志愿与你我同行。宣传推广部期待与你一起记录美好的志愿时光!'
}
]
},
dayituan: {
name: '大学生艺术团',
departmentList: [
{
name: '综合部',
posterStyle: {
backgroundImage: `url(${imgHostname}dayituan/dayituan-zonghe.png)`
},
introduction:
'综合部是整个大艺团的心脏和大脑,负责整个艺术团后勤以及技术设备支持。目前,综合部内部分为工作组,负责校内文艺晚会和颁奖典礼的后勤,提供舞美、音响灯光调控和后台支持,同时衔接管理着各大团以及团内的日常事务。宣传组,管理大艺团官方公众号的运营,负责与艺术团相关活动的照片记录,新闻稿整理。校礼仪队,大艺团管理的礼仪队是重庆邮电大学唯一一支校级礼仪队,负责校内各大颁奖典礼的礼仪工作。'
},
{
name: '管乐团',
posterStyle: {
backgroundImage: `url(${imgHostname}dayituan/dayituan-guanyue.png)`
},
introduction:
'管乐团是大艺团的一个分团,历史悠久,承担着我校各项大型演出任务,组织编排各项文艺节目,为有文艺特长的同学搭建良好的交流和展示平台,在大大小小的比赛舞台上获奖无数。虽然人数不是最多的,但是我们的组成非常细致,木管、铜管、打击乐还有弦乐,再往下就是各种乐器声部。管乐团不仅是学乐器的地方,还能享受大排练那愉悦的气氛。'
},
{
name: '民乐团',
posterStyle: {
backgroundImage: `url(${imgHostname}dayituan/dayituan-minyue.png)`
},
introduction:
'重邮艺术血脉里的民族血统,磅礴壮丽的乐曲演出,细致严谨的排练风格,温暖和熙的相处氛围。我们带来优秀的演绎作品,传播民乐的魅力。不论是校内还是校外的大型活动,我们都投身参与,斩获荣誉。对待音乐,我们是认真而热忱的。我们喜爱志同道合的朋友,携手同行,相互进步。大音,只为让每一个人的神经递质疯狂释放。'
},
{
name: '舞蹈团',
posterStyle: {
backgroundImage: `url(${imgHostname}dayituan/dayituan-wudao.png)`
},
introduction:
'大学生艺术团舞蹈团是以民族舞、古典舞、现代舞为主的为各大舞台筹划演出和准备比赛的校级组织。平时会组织基本功、舞感训练和舞蹈组合的排练,并且有专门的舞蹈老师带队。学校的大型演出都有舞蹈团的身影!这里无关身高,无关技巧,只要有一颗热爱舞蹈的心就足够。'
},
{
name: '合唱团',
posterStyle: {
backgroundImage: `url(${imgHostname}dayituan/dayituan-hechang.png)`
},
introduction:
'重邮大艺团合唱团是隶属校团委的艺术团体,自成立以来,为广大爱好唱歌的同学提供一个提高合唱水平和互相交流的空间, 提升团员的整体文艺素质。作为校艺术团的主干力量,自成立以来就承担起学校各方面的合唱演出节目。合唱团长期坚持科学的歌唱发声训练和规范化的排练,并聘请专业老师进行指导。一直致力于音乐的推广,丰富学生的精神生活,并将加强文化交流、增进友谊情感作为自己的宗旨。合唱团热情欢迎每一个热爱歌唱的你。'
},
{
name: '话剧团',
posterStyle: {
backgroundImage: `url(${imgHostname}dayituan/dayituan-huaju.png)`
},
introduction:
'话剧团是一个以原创剧本为主的表演舞台,为各大舞台筹划演出以及参加各项比赛的校级组织。在学校的各大舞台以及重庆市大艺展总会有话剧团的身影。所以如果你有一颗热爱表演的心,我们就为你搭建舞台,成就你的梦想;如果你渴望释放自我,寻找自己的花样年华,我们就给你无限可能,演绎你自己的缤纷色彩。在这里既能满足你学业上的需求,又收获无限的快乐。So~优秀的你可别错过这一站哟!'
}
]
},
kelian: {
name: '学生科技联合会',
departmentList: [
{
name: '综合部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-zonghe.png)`
},
introduction:
'综合部是学生科联的枢纽中心。它是连接团委、主席团和科联各个部门、干事之间的纽带,起到上传下达的作用,并以办公室的日常管理为工作重点。其主要负责制定、完善、推广科联制度;负责科联对外联络和洽谈;负责科联各部门内部协调;负责组织科联干部干事成立大会、素质拓展、科联交享悦、科联杯、干部干事培训和考核、人才互荐交流、总结晚会等活动;负责与各学院的学生科创组织保持紧密联系,开展交流活动;负责与兄弟高校的学生科创组织进行交流合作。'
},
{
name: '科创竞赛部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-jingsai.png)`
},
introduction:
'科创竞赛部是学生科技联合会的竞技中心。以“科创点亮未来,竞赛成就梦想”为理念,主要负责开展和推广校内外科技竞赛活动,如“科普先锋秀”、“无线电猎狐大赛”等;组织“挑战杯”(大挑)大学生课外学术科技作品竞赛、“创青春”(小挑)全国大学生创业大赛的申报和立项工作。致力于为全校提供一个校内外公平竞技的平台,营造良好的校园竞技氛围。'
},
{
name: '项目管理部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-guanli.png)`
},
introduction:
'项目管理部是学生科联的科创中心,主要负责学生科联的创新、创业工作。负责协调开展“学生科技节”等系列大型科技活动,包括组织开展“学生课外学术科技作品竞赛”、“创新创业训练营”、“学生创新创业成果展”等系列活动。同时致力于“创新高端论坛”校内外创新创业沙龙活动、运用网络媒体发布校内外科创赛事资讯等,构建学校创新创业项目交流以及推广的平台,营造浓厚的科技创新创业氛围。'
},
{
name: '科技人文部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-renwen.png)`
},
introduction:
'科技人文部作为学生科技联合会的文化活动中心,秉承着“科技点缀生活,人文融入梦想”的理念,以“文峰青年大讲堂”、“重邮青年说”、“学长演播厅”为主打品牌活动。文峰青年大讲堂诚邀知名专家学者和文化名人,旨在浓厚校园科技文化氛围的同时,强化人文环境,打造属于重邮的专属讲堂。重邮青年说旨在寻找和培养一批敢于发声,说出自己对生活的感悟的重邮人,传播年轻正能量。学长演播厅邀请优秀学长学姐,为新生答疑解惑,力求将最新最热最有用的大学资讯和成功经验分享给重邮学子。'
},
{
name: '信息部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-xinxi.png)`
},
introduction:
'信息部是学生科联的技术中心。其主要负责科技创新实践活动的培训与开展。Web组主要负责Html5的开发及Web前端的基础培训,静态网页与动态网页的制作;UI组负责对网页整体界面美观、人机交互、操作逻辑的设计;运营组主要负责利用PowerPoint和Premiere等软件来进行产品运营以及宣传。致力于“培养精英团队”,打造科联信息化平台,丰富科联的创新创意活动。'
},
{
name: '媒体运营部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-meiti.png)`
},
introduction:
'媒体运营部是学生科联的创意中心。其主要负责学生科联创意设计及校内外宣传工作。设计海报、条漫及展板,活动现场布置等;制作活动视频、微电影和动画以及活动现场摄影摄像及新闻稿的撰写。同时也负责学生科联线上的运营工作,管理科联公众号的推广,通过海报、视频、网络等形式在校内外宣传科联活动,打响科联品牌,展示科联成果。'
}
]
},
shelian: {
name: '学生社团联合会',
departmentList: [
{
name: '综合部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-zonghe.png)`
},
introduction:
'作为我校学生社团联合会的管家部门,是一个沟通组织内部、联系组织外部的桥梁性部门。综合部,对内负责社联物资管理、各部门考勤考核以及财务报账问题,解决社联体系里的琐事,组织内部四个部门团建,协助监督其他部门完成相应任务。对外,掌握我校社联对外高校的交流与联系,为给树立我校社联树立一个良好的形象做出不少努力。部门更注重的是带动学校各个社团的发展,时刻监督并管理着社团的充分运行。'
},
{
name: '社团活动部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-huodong.png)`
},
introduction:
'社团活动部作为学生社团联合会直属四部门之一是一个以社团活动为主的特殊部门。社团活动部通过对各个社团不同的特点,针对不同类别的社团策划、组织、开展有助于社团发展的活动,社团活动部更侧重于组织开展新颖且独具特色的社团活动,同时承办各部门共同举办校级大型活动,丰富校园文化。'
},
{
name: '社团服务部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-fuwu.png)`
},
introduction:
'社团服务部服务部作为社联必不可少的四大部门之一,有着社团小帮手的称号。主要的工作职责是管理社团,是社联连接社团、社团部的重要桥梁,组织着社团的成立、招新、换届以及社团的评奖评优等工作。社团服务部相比其他三个更注重的是带动学校各个社团的发展,时刻监督并管理着社团的充分运行。'
},
{
name: '宣传部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-xuanchuan.png)`
},
introduction:
'宣传部作为学生社团联合会下的直属部门,主要负责相关活动前期的宣传推广工作,设计宣传推送、活动海报、内容视频等,以使后期活动能够顺利开展,并达到预期效果。同时负责审批社团的活动海报、视频等相关文化制品。并参与运营和搭建社联新媒体的学生社团宣传平台,更新宣传方法,加大宣传力度,拓宽宣传受众面。致力于使更多的同学了解并参与各个社团以及其组织的相关活动,丰富同学们的课余生活。'
},
]
},
yanhui: {
name: '研究生会',
departmentList: [
{
name: '科技实践部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-keji.jpg)`
},
introduction:
'我们将致力于创办与科技相关的赛事活动,如主办全校研究生英语风采大赛、协办各项科技竞赛、参管研究生辩论队。为我校研究生打开有创新、有思想、有趣味的新世界大门。'
},
{
name: '信息宣传部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-xinxuan.jpg)`
},
introduction:
'负责公众号运营,研会线上、线下宣传产品设计,宣传片、视频等多媒体作品制作以及其他宣传工作,对研会各个活动进行品牌包装和技术支持。让我们告别枯燥的海报制作,轻松掌握新媒体运营技巧。在信宣,技术不是关键,脑洞征服世界。'
},
{
name: '外联部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-wailian.jpg)`
},
introduction:
'主管企业俱乐部,负责全国各兄弟院校之间、本校研究生与企业等单位之间、校内各组织间的沟通与交流。各高校知名专家学者,重邮知名校友校企,社会知名人士都是你沟通的对象。'
},
{
name: '自律权益部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-zilv.jpg)`
},
introduction:
'这里是学生实现自我管理的重要平台,我们要配合学校管理日常纪律、维护公共秩序,还要协助学生宿舍安全卫生检查工作。我们的目标是:为全校研究生营造安全、文明、舒适的学习和生活环境。'
},
{
name: '人力资源部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-renli.jpg)`
},
introduction:
'掌握研会命脉,聆听各部心声。负责研会的人事管理、活动记录和物资进出与调度。长期的工作中以严谨高效的作风,根据研会章程,制定活动规范与考核制度。主办新老生交流会及素质拓展等活动,加强研会内部交流融合。人力资源部,团结研会力量,做实力HR !'
},
{
name: '文体部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-wenti.jpg)`
},
| name: '学术交流部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-xueshu.jpg)`
},
introduction:
'以举办特色鲜明的学术讲座和论坛活动为主,拓宽研究生的学术视野,增强同学们的学术研究氛围。同时根据在校研究生的需求,开展切实有效的不同主题交流活动,构建各领域专家与同学们面对面交流的平台。学术交流部,看似高冷却超接地气的部门!'
}
]
},
huweidui: {
name: '国旗护卫队',
departmentList: [
{
name: '国旗护卫队',
posterStyle: {
backgroundImage: `url(${imgHostname}huweidui/huweidui.png)`
},
introduction:
'重庆邮电大学国旗护卫队成立于2005年,原隶属于校团委,后调归学校保卫处人民武装部下。自成立至今,出色的完成了重庆人民广场“五一”升国旗任务、重庆市第五次少代会升旗仪式、校运会、八院联谊、九院联谊、特殊纪念日及每周一的升旗等任务。队内每一位队员都经过严格的训练,在校武装部和各届队员的共同努力下,已经形成具有重庆邮电大学特色的升旗模式。重庆邮电大学国旗护卫队受到各界广泛好评,为新时代爱国主义教育工作做出了巨大贡献。'
},
]
},
}
export default organizationListDetail | introduction:
'研究生校园文化生活的缔造者和领跑人。于文,主办迎新晚会等大型活动,丰富研究生的课余生活,协助各分研会举办各类文艺活动,营造活跃向上的氛围。于体,参与组建、管理研究生各类球队,积极参加各类校级比赛,如运动会、“青春杯”篮球、足球赛、公园排球赛、校园马拉松等,宣传体育育人理念,提高研究生的综合素质。'
},
{
| random_line_split |
dominogame.go | package main
import (
"fmt"
"math/rand"
"strconv"
)
type dominoGame struct {
players []player
pieces []dominoPiece
grid dominoGrid
turnOrder []int
}
type player struct {
playerNumber int
ownedPieces []dominoPiece
}
type dominoPiece struct {
top int
bot int
}
type dominoGrid struct {
grid [][]string
}
func main() {
fmt.Println("-------- Domino Game --------")
//creating the game and players
game := generateNewGame(pickPlayers())
//start game
fmt.Println("---- Game Start! ----")
fmt.Println("game.turnOrder:", game.turnOrder)
game.playGame()
fmt.Println("---- Game End! ----")
printDebug(game)
}
func printGrid(grid dominoGrid) {
for _, v := range grid.grid {
fmt.Println(v)
}
}
func pickPlayers() int {
var numPlayers int
for {
fmt.Printf("\nHow many players?\n")
fmt.Scan(&numPlayers)
if numPlayers >= 2 && numPlayers <= 4 {
return numPlayers
}
fmt.Println("Invalid number of players. Please pick 2 to 4 players.")
}
}
func generateNewGame(numPlayers int) dominoGame {
var game dominoGame
//build the player objects
for i := 1; i < numPlayers+1; i++ {
game.players = append(game.players, player{playerNumber: i})
}
//build the domino pieces
k := 0
for i := 0; i < 7; i++ {
for j := k; j < 7; j++ {
game.pieces = append(game.pieces, dominoPiece{top: i, bot: j})
}
k++
}
//build a 3x3 grid to start, this will expand as pieces get placed
game.grid.grid = [][]string{{"X", "X", "X"}, {"X", "X", "X"}, {"X", "X", "X"}}
//shuffling the pieces
game, firstMove := assignPieces(game)
game.turnOrder = generateTurnOrder(firstMove, game.players)
return game
}
func assignPieces(gameRaw dominoGame) (dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.ownedPieces, game.pieces[r])
firstTurn, highestDouble = firstMove(game.pieces[r], highestDouble, firstTurn, player.playerNumber)
game.pieces = remove(game.pieces, r)
}
game.players[k] = player
} | }
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoPiece, i int) []dominoPiece {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func generateTurnOrder(firstMove int, players []player) (turnOrder []int) {
turnOrder = append(turnOrder, firstMove)
for _, player := range players {
if player.playerNumber != firstMove {
turnOrder = append(turnOrder, player.playerNumber)
}
}
return
}
func (game *dominoGame) playGame() {
firstTurn := true
var pickedPiece int
var newOwnedPieces []dominoPiece
var newGrid dominoGrid
for {
//players place their pieces down in specific turns
for _, playerNum := range game.turnOrder {
printGrid(game.grid)
if firstTurn {
//have to place the highest doubles piece for the first turn
highestDouble := getHighestDouble(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) {
fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
case 3:
if newGrid.grid[y][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y][x-1] = end2
if y == len(newGrid.grid[0])-1 {
expandGrid("bot", newGrid)
}
break
case 4:
if newGrid.grid[y-1][x-2] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x-2] = end2
if x == 2 {
expandGrid("left", newGrid)
}
break
default:
fmt.Println("Invalid orientation. Select one of the numbers for each side.")
}
break
}
break
}
//overwrite with the new grid
grid = newGrid
printGrid(grid)
//remove piece from owned player pieces
for k, playerPiece := range playerPieces {
if playerPiece == piece {
playerPieces = remove(playerPieces, k)
break
}
}
return playerPieces, grid
}
func checkPiece(piece dominoPiece, grid dominoGrid) bool {
viable := false
for y := 1; y <= len(grid.grid)-2; y++ {
for x := 1; x <= len(grid.grid[0])-2; x++ {
//check if it could be matched with any domino on the board
if grid.grid[y][x] == strconv.Itoa(piece.top) || grid.grid[y][x] == strconv.Itoa(piece.bot) {
//check if there is room to place
if grid.grid[y+1][x] == "X" || grid.grid[y-1][x] == "X" || grid.grid[y][x+1] == "X" || grid.grid[y][x-1] == "X" {
viable = true
}
}
}
}
return viable
}
func selectPieceEnd(piece dominoPiece) (end string) {
for {
fmt.Println("Piece ", piece, " selected. Select end: Top -", piece.top, " Bot -", piece.bot)
fmt.Scan(&end)
endInt, err := strconv.Atoi(end)
if err != nil {
fmt.Println("Invalid input. Type in a number.")
continue
}
if endInt != piece.top && endInt != piece.bot {
fmt.Println("Invalid end. Select ", piece.top, " or ", piece.bot, ".")
continue
}
break
}
return
}
func isSpaceAlreadyOccupied(newGrid dominoGrid, x, y int) (occupied bool) {
if newGrid.grid[y-1][x-1] != "X" {
fmt.Println("There is already a piece here. Choose a free set of coordinates.")
printGrid(newGrid)
return true
}
return false
}
func isSpaceNextToEquivalentEnd(newGrid dominoGrid, y, x int, end string) (spaceViable bool) {
fmt.Println("x: ", x, " y: ", y, " end: ", end)
//check space above if possible
if y != 1 {
if newGrid.grid[y-2][x-1] == end {
return true
}
}
//check space to the right if possible
if x != len(newGrid.grid[0]) {
if newGrid.grid[y-1][x] == end {
return true
}
}
//check space below if possible
if y != len(newGrid.grid) {
if newGrid.grid[y][x-1] == end {
return true
}
}
//check space to the left if possible
if x != 1 {
if newGrid.grid[y-1][x-2] == end {
return true
}
}
fmt.Println("There is no equivalent end next to this space. Select different coordinates.")
return false
}
func expandGrid(edge string, grid dominoGrid) dominoGrid {
fmt.Println("== expand grid ", edge)
printGrid(grid)
switch edge {
case "top":
//add a row and shift everything down by 1.
grid.grid = append([][]string{grid.grid[len(grid.grid)-1]}, grid.grid...)
case "right":
//add a column
for k, row := range grid.grid {
row = append(row, "X")
grid.grid[k] = row
}
case "bot":
//add a row
grid.grid = append(grid.grid, grid.grid[0])
case "left":
//add a column and shift everything right by 1
for k, _ := range grid.grid {
grid.grid[k] = append([]string{"X"}, grid.grid[k]...)
}
}
fmt.Println()
printGrid(grid)
return grid
}
func printDebug(game dominoGame) {
fmt.Println("--- game debug ---")
//printing data for DEBUG
fmt.Println("players: ", game.players)
fmt.Println("pieces: ", game.pieces)
fmt.Println("--- ---------- ---")
//printGrid(game.grid)
} | if firstTurn != 0 {
return game, firstTurn
} | random_line_split |
dominogame.go | package main
import (
"fmt"
"math/rand"
"strconv"
)
type dominoGame struct {
players []player
pieces []dominoPiece
grid dominoGrid
turnOrder []int
}
type player struct {
playerNumber int
ownedPieces []dominoPiece
}
type dominoPiece struct {
top int
bot int
}
type dominoGrid struct {
grid [][]string
}
func main() {
fmt.Println("-------- Domino Game --------")
//creating the game and players
game := generateNewGame(pickPlayers())
//start game
fmt.Println("---- Game Start! ----")
fmt.Println("game.turnOrder:", game.turnOrder)
game.playGame()
fmt.Println("---- Game End! ----")
printDebug(game)
}
func printGrid(grid dominoGrid) {
for _, v := range grid.grid {
fmt.Println(v)
}
}
func pickPlayers() int {
var numPlayers int
for {
fmt.Printf("\nHow many players?\n")
fmt.Scan(&numPlayers)
if numPlayers >= 2 && numPlayers <= 4 {
return numPlayers
}
fmt.Println("Invalid number of players. Please pick 2 to 4 players.")
}
}
func generateNewGame(numPlayers int) dominoGame {
var game dominoGame
//build the player objects
for i := 1; i < numPlayers+1; i++ {
game.players = append(game.players, player{playerNumber: i})
}
//build the domino pieces
k := 0
for i := 0; i < 7; i++ {
for j := k; j < 7; j++ {
game.pieces = append(game.pieces, dominoPiece{top: i, bot: j})
}
k++
}
//build a 3x3 grid to start, this will expand as pieces get placed
game.grid.grid = [][]string{{"X", "X", "X"}, {"X", "X", "X"}, {"X", "X", "X"}}
//shuffling the pieces
game, firstMove := assignPieces(game)
game.turnOrder = generateTurnOrder(firstMove, game.players)
return game
}
func assignPieces(gameRaw dominoGame) (dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.ownedPieces, game.pieces[r])
firstTurn, highestDouble = firstMove(game.pieces[r], highestDouble, firstTurn, player.playerNumber)
game.pieces = remove(game.pieces, r)
}
game.players[k] = player
}
if firstTurn != 0 {
return game, firstTurn
}
}
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoPiece, i int) []dominoPiece {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func generateTurnOrder(firstMove int, players []player) (turnOrder []int) {
turnOrder = append(turnOrder, firstMove)
for _, player := range players {
if player.playerNumber != firstMove {
turnOrder = append(turnOrder, player.playerNumber)
}
}
return
}
func (game *dominoGame) playGame() {
firstTurn := true
var pickedPiece int
var newOwnedPieces []dominoPiece
var newGrid dominoGrid
for {
//players place their pieces down in specific turns
for _, playerNum := range game.turnOrder {
printGrid(game.grid)
if firstTurn {
//have to place the highest doubles piece for the first turn
highestDouble := getHighestDouble(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) |
func checkPiece(piece dominoPiece, grid dominoGrid) bool {
viable := false
for y := 1; y <= len(grid.grid)-2; y++ {
for x := 1; x <= len(grid.grid[0])-2; x++ {
//check if it could be matched with any domino on the board
if grid.grid[y][x] == strconv.Itoa(piece.top) || grid.grid[y][x] == strconv.Itoa(piece.bot) {
//check if there is room to place
if grid.grid[y+1][x] == "X" || grid.grid[y-1][x] == "X" || grid.grid[y][x+1] == "X" || grid.grid[y][x-1] == "X" {
viable = true
}
}
}
}
return viable
}
func selectPieceEnd(piece dominoPiece) (end string) {
for {
fmt.Println("Piece ", piece, " selected. Select end: Top -", piece.top, " Bot -", piece.bot)
fmt.Scan(&end)
endInt, err := strconv.Atoi(end)
if err != nil {
fmt.Println("Invalid input. Type in a number.")
continue
}
if endInt != piece.top && endInt != piece.bot {
fmt.Println("Invalid end. Select ", piece.top, " or ", piece.bot, ".")
continue
}
break
}
return
}
func isSpaceAlreadyOccupied(newGrid dominoGrid, x, y int) (occupied bool) {
if newGrid.grid[y-1][x-1] != "X" {
fmt.Println("There is already a piece here. Choose a free set of coordinates.")
printGrid(newGrid)
return true
}
return false
}
func isSpaceNextToEquivalentEnd(newGrid dominoGrid, y, x int, end string) (spaceViable bool) {
fmt.Println("x: ", x, " y: ", y, " end: ", end)
//check space above if possible
if y != 1 {
if newGrid.grid[y-2][x-1] == end {
return true
}
}
//check space to the right if possible
if x != len(newGrid.grid[0]) {
if newGrid.grid[y-1][x] == end {
return true
}
}
//check space below if possible
if y != len(newGrid.grid) {
if newGrid.grid[y][x-1] == end {
return true
}
}
//check space to the left if possible
if x != 1 {
if newGrid.grid[y-1][x-2] == end {
return true
}
}
fmt.Println("There is no equivalent end next to this space. Select different coordinates.")
return false
}
func expandGrid(edge string, grid dominoGrid) dominoGrid {
fmt.Println("== expand grid ", edge)
printGrid(grid)
switch edge {
case "top":
//add a row and shift everything down by 1.
grid.grid = append([][]string{grid.grid[len(grid.grid)-1]}, grid.grid...)
case "right":
//add a column
for k, row := range grid.grid {
row = append(row, "X")
grid.grid[k] = row
}
case "bot":
//add a row
grid.grid = append(grid.grid, grid.grid[0])
case "left":
//add a column and shift everything right by 1
for k, _ := range grid.grid {
grid.grid[k] = append([]string{"X"}, grid.grid[k]...)
}
}
fmt.Println()
printGrid(grid)
return grid
}
func printDebug(game dominoGame) {
fmt.Println("--- game debug ---")
//printing data for DEBUG
fmt.Println("players: ", game.players)
fmt.Println("pieces: ", game.pieces)
fmt.Println("--- ---------- ---")
//printGrid(game.grid)
}
| {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) {
fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
case 3:
if newGrid.grid[y][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y][x-1] = end2
if y == len(newGrid.grid[0])-1 {
expandGrid("bot", newGrid)
}
break
case 4:
if newGrid.grid[y-1][x-2] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x-2] = end2
if x == 2 {
expandGrid("left", newGrid)
}
break
default:
fmt.Println("Invalid orientation. Select one of the numbers for each side.")
}
break
}
break
}
//overwrite with the new grid
grid = newGrid
printGrid(grid)
//remove piece from owned player pieces
for k, playerPiece := range playerPieces {
if playerPiece == piece {
playerPieces = remove(playerPieces, k)
break
}
}
return playerPieces, grid
} | identifier_body |
dominogame.go | package main
import (
"fmt"
"math/rand"
"strconv"
)
type dominoGame struct {
players []player
pieces []dominoPiece
grid dominoGrid
turnOrder []int
}
type player struct {
playerNumber int
ownedPieces []dominoPiece
}
type dominoPiece struct {
top int
bot int
}
type dominoGrid struct {
grid [][]string
}
func main() {
fmt.Println("-------- Domino Game --------")
//creating the game and players
game := generateNewGame(pickPlayers())
//start game
fmt.Println("---- Game Start! ----")
fmt.Println("game.turnOrder:", game.turnOrder)
game.playGame()
fmt.Println("---- Game End! ----")
printDebug(game)
}
func printGrid(grid dominoGrid) {
for _, v := range grid.grid {
fmt.Println(v)
}
}
func pickPlayers() int {
var numPlayers int
for {
fmt.Printf("\nHow many players?\n")
fmt.Scan(&numPlayers)
if numPlayers >= 2 && numPlayers <= 4 {
return numPlayers
}
fmt.Println("Invalid number of players. Please pick 2 to 4 players.")
}
}
func generateNewGame(numPlayers int) dominoGame {
var game dominoGame
//build the player objects
for i := 1; i < numPlayers+1; i++ {
game.players = append(game.players, player{playerNumber: i})
}
//build the domino pieces
k := 0
for i := 0; i < 7; i++ {
for j := k; j < 7; j++ {
game.pieces = append(game.pieces, dominoPiece{top: i, bot: j})
}
k++
}
//build a 3x3 grid to start, this will expand as pieces get placed
game.grid.grid = [][]string{{"X", "X", "X"}, {"X", "X", "X"}, {"X", "X", "X"}}
//shuffling the pieces
game, firstMove := assignPieces(game)
game.turnOrder = generateTurnOrder(firstMove, game.players)
return game
}
func | (gameRaw dominoGame) (dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.ownedPieces, game.pieces[r])
firstTurn, highestDouble = firstMove(game.pieces[r], highestDouble, firstTurn, player.playerNumber)
game.pieces = remove(game.pieces, r)
}
game.players[k] = player
}
if firstTurn != 0 {
return game, firstTurn
}
}
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoPiece, i int) []dominoPiece {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func generateTurnOrder(firstMove int, players []player) (turnOrder []int) {
turnOrder = append(turnOrder, firstMove)
for _, player := range players {
if player.playerNumber != firstMove {
turnOrder = append(turnOrder, player.playerNumber)
}
}
return
}
func (game *dominoGame) playGame() {
firstTurn := true
var pickedPiece int
var newOwnedPieces []dominoPiece
var newGrid dominoGrid
for {
//players place their pieces down in specific turns
for _, playerNum := range game.turnOrder {
printGrid(game.grid)
if firstTurn {
//have to place the highest doubles piece for the first turn
highestDouble := getHighestDouble(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) {
fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
case 3:
if newGrid.grid[y][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y][x-1] = end2
if y == len(newGrid.grid[0])-1 {
expandGrid("bot", newGrid)
}
break
case 4:
if newGrid.grid[y-1][x-2] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x-2] = end2
if x == 2 {
expandGrid("left", newGrid)
}
break
default:
fmt.Println("Invalid orientation. Select one of the numbers for each side.")
}
break
}
break
}
//overwrite with the new grid
grid = newGrid
printGrid(grid)
//remove piece from owned player pieces
for k, playerPiece := range playerPieces {
if playerPiece == piece {
playerPieces = remove(playerPieces, k)
break
}
}
return playerPieces, grid
}
func checkPiece(piece dominoPiece, grid dominoGrid) bool {
viable := false
for y := 1; y <= len(grid.grid)-2; y++ {
for x := 1; x <= len(grid.grid[0])-2; x++ {
//check if it could be matched with any domino on the board
if grid.grid[y][x] == strconv.Itoa(piece.top) || grid.grid[y][x] == strconv.Itoa(piece.bot) {
//check if there is room to place
if grid.grid[y+1][x] == "X" || grid.grid[y-1][x] == "X" || grid.grid[y][x+1] == "X" || grid.grid[y][x-1] == "X" {
viable = true
}
}
}
}
return viable
}
func selectPieceEnd(piece dominoPiece) (end string) {
for {
fmt.Println("Piece ", piece, " selected. Select end: Top -", piece.top, " Bot -", piece.bot)
fmt.Scan(&end)
endInt, err := strconv.Atoi(end)
if err != nil {
fmt.Println("Invalid input. Type in a number.")
continue
}
if endInt != piece.top && endInt != piece.bot {
fmt.Println("Invalid end. Select ", piece.top, " or ", piece.bot, ".")
continue
}
break
}
return
}
func isSpaceAlreadyOccupied(newGrid dominoGrid, x, y int) (occupied bool) {
if newGrid.grid[y-1][x-1] != "X" {
fmt.Println("There is already a piece here. Choose a free set of coordinates.")
printGrid(newGrid)
return true
}
return false
}
func isSpaceNextToEquivalentEnd(newGrid dominoGrid, y, x int, end string) (spaceViable bool) {
fmt.Println("x: ", x, " y: ", y, " end: ", end)
//check space above if possible
if y != 1 {
if newGrid.grid[y-2][x-1] == end {
return true
}
}
//check space to the right if possible
if x != len(newGrid.grid[0]) {
if newGrid.grid[y-1][x] == end {
return true
}
}
//check space below if possible
if y != len(newGrid.grid) {
if newGrid.grid[y][x-1] == end {
return true
}
}
//check space to the left if possible
if x != 1 {
if newGrid.grid[y-1][x-2] == end {
return true
}
}
fmt.Println("There is no equivalent end next to this space. Select different coordinates.")
return false
}
func expandGrid(edge string, grid dominoGrid) dominoGrid {
fmt.Println("== expand grid ", edge)
printGrid(grid)
switch edge {
case "top":
//add a row and shift everything down by 1.
grid.grid = append([][]string{grid.grid[len(grid.grid)-1]}, grid.grid...)
case "right":
//add a column
for k, row := range grid.grid {
row = append(row, "X")
grid.grid[k] = row
}
case "bot":
//add a row
grid.grid = append(grid.grid, grid.grid[0])
case "left":
//add a column and shift everything right by 1
for k, _ := range grid.grid {
grid.grid[k] = append([]string{"X"}, grid.grid[k]...)
}
}
fmt.Println()
printGrid(grid)
return grid
}
func printDebug(game dominoGame) {
fmt.Println("--- game debug ---")
//printing data for DEBUG
fmt.Println("players: ", game.players)
fmt.Println("pieces: ", game.pieces)
fmt.Println("--- ---------- ---")
//printGrid(game.grid)
}
| assignPieces | identifier_name |
dominogame.go | package main
import (
"fmt"
"math/rand"
"strconv"
)
type dominoGame struct {
players []player
pieces []dominoPiece
grid dominoGrid
turnOrder []int
}
type player struct {
playerNumber int
ownedPieces []dominoPiece
}
type dominoPiece struct {
top int
bot int
}
type dominoGrid struct {
grid [][]string
}
func main() {
fmt.Println("-------- Domino Game --------")
//creating the game and players
game := generateNewGame(pickPlayers())
//start game
fmt.Println("---- Game Start! ----")
fmt.Println("game.turnOrder:", game.turnOrder)
game.playGame()
fmt.Println("---- Game End! ----")
printDebug(game)
}
func printGrid(grid dominoGrid) {
for _, v := range grid.grid {
fmt.Println(v)
}
}
func pickPlayers() int {
var numPlayers int
for {
fmt.Printf("\nHow many players?\n")
fmt.Scan(&numPlayers)
if numPlayers >= 2 && numPlayers <= 4 {
return numPlayers
}
fmt.Println("Invalid number of players. Please pick 2 to 4 players.")
}
}
func generateNewGame(numPlayers int) dominoGame {
var game dominoGame
//build the player objects
for i := 1; i < numPlayers+1; i++ {
game.players = append(game.players, player{playerNumber: i})
}
//build the domino pieces
k := 0
for i := 0; i < 7; i++ {
for j := k; j < 7; j++ {
game.pieces = append(game.pieces, dominoPiece{top: i, bot: j})
}
k++
}
//build a 3x3 grid to start, this will expand as pieces get placed
game.grid.grid = [][]string{{"X", "X", "X"}, {"X", "X", "X"}, {"X", "X", "X"}}
//shuffling the pieces
game, firstMove := assignPieces(game)
game.turnOrder = generateTurnOrder(firstMove, game.players)
return game
}
func assignPieces(gameRaw dominoGame) (dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.ownedPieces, game.pieces[r])
firstTurn, highestDouble = firstMove(game.pieces[r], highestDouble, firstTurn, player.playerNumber)
game.pieces = remove(game.pieces, r)
}
game.players[k] = player
}
if firstTurn != 0 {
return game, firstTurn
}
}
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoPiece, i int) []dominoPiece {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func generateTurnOrder(firstMove int, players []player) (turnOrder []int) {
turnOrder = append(turnOrder, firstMove)
for _, player := range players {
if player.playerNumber != firstMove {
turnOrder = append(turnOrder, player.playerNumber)
}
}
return
}
func (game *dominoGame) playGame() {
firstTurn := true
var pickedPiece int
var newOwnedPieces []dominoPiece
var newGrid dominoGrid
for {
//players place their pieces down in specific turns
for _, playerNum := range game.turnOrder {
printGrid(game.grid)
if firstTurn {
//have to place the highest doubles piece for the first turn
highestDouble := getHighestDouble(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) {
fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
case 3:
if newGrid.grid[y][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y][x-1] = end2
if y == len(newGrid.grid[0])-1 {
expandGrid("bot", newGrid)
}
break
case 4:
if newGrid.grid[y-1][x-2] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x-2] = end2
if x == 2 {
expandGrid("left", newGrid)
}
break
default:
fmt.Println("Invalid orientation. Select one of the numbers for each side.")
}
break
}
break
}
//overwrite with the new grid
grid = newGrid
printGrid(grid)
//remove piece from owned player pieces
for k, playerPiece := range playerPieces {
if playerPiece == piece {
playerPieces = remove(playerPieces, k)
break
}
}
return playerPieces, grid
}
func checkPiece(piece dominoPiece, grid dominoGrid) bool {
viable := false
for y := 1; y <= len(grid.grid)-2; y++ {
for x := 1; x <= len(grid.grid[0])-2; x++ {
//check if it could be matched with any domino on the board
if grid.grid[y][x] == strconv.Itoa(piece.top) || grid.grid[y][x] == strconv.Itoa(piece.bot) |
}
}
return viable
}
func selectPieceEnd(piece dominoPiece) (end string) {
for {
fmt.Println("Piece ", piece, " selected. Select end: Top -", piece.top, " Bot -", piece.bot)
fmt.Scan(&end)
endInt, err := strconv.Atoi(end)
if err != nil {
fmt.Println("Invalid input. Type in a number.")
continue
}
if endInt != piece.top && endInt != piece.bot {
fmt.Println("Invalid end. Select ", piece.top, " or ", piece.bot, ".")
continue
}
break
}
return
}
func isSpaceAlreadyOccupied(newGrid dominoGrid, x, y int) (occupied bool) {
if newGrid.grid[y-1][x-1] != "X" {
fmt.Println("There is already a piece here. Choose a free set of coordinates.")
printGrid(newGrid)
return true
}
return false
}
func isSpaceNextToEquivalentEnd(newGrid dominoGrid, y, x int, end string) (spaceViable bool) {
fmt.Println("x: ", x, " y: ", y, " end: ", end)
//check space above if possible
if y != 1 {
if newGrid.grid[y-2][x-1] == end {
return true
}
}
//check space to the right if possible
if x != len(newGrid.grid[0]) {
if newGrid.grid[y-1][x] == end {
return true
}
}
//check space below if possible
if y != len(newGrid.grid) {
if newGrid.grid[y][x-1] == end {
return true
}
}
//check space to the left if possible
if x != 1 {
if newGrid.grid[y-1][x-2] == end {
return true
}
}
fmt.Println("There is no equivalent end next to this space. Select different coordinates.")
return false
}
func expandGrid(edge string, grid dominoGrid) dominoGrid {
fmt.Println("== expand grid ", edge)
printGrid(grid)
switch edge {
case "top":
//add a row and shift everything down by 1.
grid.grid = append([][]string{grid.grid[len(grid.grid)-1]}, grid.grid...)
case "right":
//add a column
for k, row := range grid.grid {
row = append(row, "X")
grid.grid[k] = row
}
case "bot":
//add a row
grid.grid = append(grid.grid, grid.grid[0])
case "left":
//add a column and shift everything right by 1
for k, _ := range grid.grid {
grid.grid[k] = append([]string{"X"}, grid.grid[k]...)
}
}
fmt.Println()
printGrid(grid)
return grid
}
func printDebug(game dominoGame) {
fmt.Println("--- game debug ---")
//printing data for DEBUG
fmt.Println("players: ", game.players)
fmt.Println("pieces: ", game.pieces)
fmt.Println("--- ---------- ---")
//printGrid(game.grid)
}
| {
//check if there is room to place
if grid.grid[y+1][x] == "X" || grid.grid[y-1][x] == "X" || grid.grid[y][x+1] == "X" || grid.grid[y][x-1] == "X" {
viable = true
}
} | conditional_block |
lib.rs | //! An element-tree style XML library
//!
//! # Examples
//!
//! ## Reading
//!
//! ```
//! use treexml::Document;
//!
//! let doc_raw = r#"
//! <?xml version="1.1" encoding="UTF-8"?>
//! <table>
//! <fruit type="apple">worm</fruit>
//! <vegetable />
//! </table>
//! "#;
//!
//! let doc = Document::parse(doc_raw.as_bytes()).unwrap();
//! let root = doc.root.unwrap();
//!
//! let fruit = root.find_child(|tag| tag.name == "fruit").unwrap().clone();
//! println!("{} [{:?}] = {}", fruit.name, fruit.attributes, fruit.text.unwrap());
//! ```
//!
//! ## Writing
//!
//! ```
//! use treexml::{Document, Element};
//!
//! let mut root = Element::new("root");
//! let mut child = Element::new("child");
//! child.text = Some("contents".to_owned());
//! root.children.push(child);
//!
//! let doc = Document{
//! root: Some(root),
//! .. Document::default()
//! };
//!
//! println!("{}", doc);
//! ```
//!
//!
// `error_chain!` can recurse deeply
#![recursion_limit = "1024"]
#[macro_use]
extern crate failure;
extern crate indexmap;
mod errors;
extern crate xml;
mod builder;
use std::borrow::Cow;
use std::fmt;
use std::io::{Read, Write};
use std::iter::Filter;
use std::slice::{Iter, IterMut};
use std::str::FromStr;
use std::string::ToString; |
use indexmap::IndexMap;
use xml::common::XmlVersion as BaseXmlVersion;
/// Enumeration of XML versions
///
/// This exists solely because `xml-rs`'s `XmlVersion` doesn't implement Debug
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML Version 1.0
Version10,
/// XML Version 1.1
Version11,
}
impl From<BaseXmlVersion> for XmlVersion {
fn from(value: BaseXmlVersion) -> XmlVersion {
match value {
BaseXmlVersion::Version10 => XmlVersion::Version10,
BaseXmlVersion::Version11 => XmlVersion::Version11,
}
}
}
impl From<XmlVersion> for BaseXmlVersion {
fn from(value: XmlVersion) -> BaseXmlVersion {
match value {
XmlVersion::Version10 => BaseXmlVersion::Version10,
XmlVersion::Version11 => BaseXmlVersion::Version11,
}
}
}
/// An XML element
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Element {
/// Tag prefix, used for namespacing: `xsl` in `xsl:for-each`
pub prefix: Option<String>,
/// Tag name: `for-each` in `xsl:for-each`
pub name: String,
/// Tag attributes
pub attributes: IndexMap<String, String>,
/// A vector of child elements
pub children: Vec<Element>,
/// Contents of the element
pub text: Option<String>,
/// CDATA contents of the element
pub cdata: Option<String>,
}
impl Default for Element {
fn default() -> Self {
Element {
prefix: None,
name: "tag".to_owned(),
attributes: IndexMap::new(),
children: Vec::new(),
text: None,
cdata: None,
}
}
}
impl Element {
/// Create a new `Element` with the tag name `name`
pub fn new<S>(name: S) -> Element
where
S: ToString,
{
Element {
name: name.to_string(),
..Element::default()
}
}
/// Parse the contents of an element
fn parse<R: Read>(
&mut self,
mut reader: &mut xml::reader::EventReader<R>,
) -> Result<(), Error> {
use xml::reader::XmlEvent;
loop {
let ev = reader.next()?;
match ev {
XmlEvent::StartElement {
name, attributes, ..
} => {
let mut attr_map = IndexMap::new();
for attr in attributes {
let attr_name = match attr.name.prefix {
Some(prefix) => format!("{}:{}", prefix, attr.name.local_name),
None => attr.name.local_name,
};
attr_map.insert(attr_name, attr.value);
}
let mut child = Element {
prefix: name.prefix,
name: name.local_name,
attributes: attr_map,
..Element::default()
};
child.parse(&mut reader)?;
self.children.push(child);
}
XmlEvent::EndElement { name } => {
if name.prefix == self.prefix && name.local_name == self.name {
return Ok(());
} else {
// This should never happen, since the base xml library will panic first
panic!("Unexpected closing tag: {}, expected {}", name, self.name);
}
}
XmlEvent::Characters(s) => {
let text = match self.text {
Some(ref v) => v.clone(),
None => String::new(),
};
self.text = Some(text + &s);
}
XmlEvent::CData(s) => {
let cdata = match self.cdata {
Some(ref v) => v.clone(),
None => String::new(),
};
self.cdata = Some(cdata + &s);
}
XmlEvent::StartDocument { .. }
| XmlEvent::EndDocument
| XmlEvent::ProcessingInstruction { .. }
| XmlEvent::Whitespace(_)
| XmlEvent::Comment(_) => {}
}
}
}
/// Write an element and its contents to `writer`
fn write<W: Write>(&self, writer: &mut xml::writer::EventWriter<W>) -> Result<(), Error> {
use xml::attribute::Attribute;
use xml::name::Name;
use xml::namespace::Namespace;
use xml::writer::XmlEvent;
let name = Name::local(&self.name);
let mut attributes = Vec::with_capacity(self.attributes.len());
for (k, v) in &self.attributes {
attributes.push(Attribute {
name: Name::local(k),
value: v,
});
}
let namespace = Namespace::empty();
writer.write(XmlEvent::StartElement {
name: name,
attributes: Cow::Owned(attributes),
namespace: Cow::Owned(namespace),
})?;
if let Some(ref text) = self.text {
writer.write(XmlEvent::Characters(&text[..]))?;
}
if let Some(ref cdata) = self.cdata {
writer.write(XmlEvent::CData(&cdata[..]))?;
}
for e in &self.children {
e.write(writer)?;
}
writer.write(XmlEvent::EndElement { name: Some(name) })?;
Ok(())
}
/// Find a single child of the current `Element`, given a predicate
pub fn find_child<P>(&self, predicate: P) -> Option<&Element>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().find(predicate)
}
/// Find a single child of the current `Element`, given a predicate; returns a mutable borrow
pub fn find_child_mut<P>(&mut self, predicate: P) -> Option<&mut Element>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().find(predicate)
}
/// Traverse element using an xpath-like string: root/child/a
pub fn find(&self, path: &str) -> Result<&Element, Error> {
Self::find_path(&path.split('/').collect::<Vec<&str>>(), path, self)
}
pub fn find_value<T: FromStr>(&self, path: &str) -> Result<Option<T>, Error> {
let el = self.find(path)?;
if let Some(text) = el.text.as_ref() {
match T::from_str(text) {
Err(_) => Err(errors::Error::ValueFromStr {
t: text.to_string(),
}.into()),
Ok(value) => Ok(Some(value)),
}
} else {
Ok(None)
}
}
fn find_path<'a>(
path: &[&str],
original: &str,
tree: &'a Element,
) -> Result<&'a Element, Error> {
if path.is_empty() {
return Ok(tree);
}
match tree.find_child(|t| t.name == path[0]) {
Some(element) => Self::find_path(&path[1..], original, element),
None => Err(errors::Error::ElementNotFound { t: original.into() }.into()),
}
}
/// Filters the children of the current `Element`, given a predicate
pub fn filter_children<P>(&self, predicate: P) -> Filter<Iter<Element>, P>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().filter(predicate)
}
/// Filters the children of the current `Element`, given a predicate; returns a mutable iterator
pub fn filter_children_mut<P>(&mut self, predicate: P) -> Filter<IterMut<Element>, P>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().filter(predicate)
}
}
impl fmt::Display for Element {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let doc = Document {
root: Some(self.clone()),
..Document::default()
};
let mut v = Vec::<u8>::new();
doc.write_with(&mut v, false, " ", true).unwrap();
let s = String::from_utf8(v).unwrap();
f.write_str(&s[..])
}
}
/// An XML document
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Document {
/// Version of the XML document
pub version: XmlVersion,
/// Encoding of the XML document
pub encoding: String,
/// Root tag of the XML document
pub root: Option<Element>,
}
impl Default for Document {
fn default() -> Self {
Document {
version: XmlVersion::Version10,
encoding: "UTF-8".to_owned(),
root: None,
}
}
}
impl Document {
/// Create a new `Document` with default values
pub fn new() -> Document {
Document {
..Document::default()
}
}
/// Create a new `Document` with an Element or ElementBuilder at its root.
pub fn build(root: &mut ElementBuilder) -> Self {
Document {
root: Some(root.element()),
..Self::default()
}
}
/// Parse data from a reader to construct an XML document
///
/// # Failures
///
/// Passes any errors that the `xml-rs` library returns up the stack
pub fn parse<R: Read>(r: R) -> Result<Document, Error> {
use xml::reader::{EventReader, XmlEvent};
let mut reader = EventReader::new(r);
let mut doc = Document::new();
loop {
let ev = reader.next()?;
match ev {
XmlEvent::StartDocument {
version, encoding, ..
} => {
doc.version = XmlVersion::from(version);
doc.encoding = encoding;
}
XmlEvent::StartElement {
name, attributes, ..
} => {
// Start of the root element
let mut attr_map = IndexMap::new();
for attr in attributes {
let attr_name = match attr.name.prefix {
Some(prefix) => format!("{}:{}", prefix, attr.name.local_name),
None => attr.name.local_name,
};
attr_map.insert(attr_name, attr.value);
}
let mut root = Element {
prefix: name.prefix,
name: name.local_name,
attributes: attr_map,
..Element::default()
};
root.parse(&mut reader)?;
doc.root = Some(root);
}
XmlEvent::EndDocument => break,
_ => {}
}
}
Ok(doc)
}
pub fn write<W: Write>(&self, mut w: &mut W) -> Result<(), Error> {
self.write_with(&mut w, true, " ", true)
}
/// Writes a document to `w`
pub fn write_with<W: Write>(
&self,
w: &mut W,
document_decl: bool,
indent_str: &'static str,
indent: bool,
) -> Result<(), Error> {
use xml::writer::{EmitterConfig, XmlEvent};
let mut writer = EmitterConfig::new()
.perform_indent(indent)
.write_document_declaration(document_decl)
.indent_string(indent_str)
.create_writer(w);
if document_decl {
writer.write(XmlEvent::StartDocument {
version: self.version.into(),
encoding: Some(&self.encoding),
standalone: None,
})?;
}
if let Some(ref e) = self.root {
e.write(&mut writer)?;
}
Ok(())
}
}
impl fmt::Display for Document {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut v = Vec::<u8>::new();
self.write(&mut v).unwrap();
let s = String::from_utf8(v).unwrap();
f.write_str(&s[..])
}
} |
pub use errors::*;
pub use builder::*; | random_line_split |
lib.rs | //! An element-tree style XML library
//!
//! # Examples
//!
//! ## Reading
//!
//! ```
//! use treexml::Document;
//!
//! let doc_raw = r#"
//! <?xml version="1.1" encoding="UTF-8"?>
//! <table>
//! <fruit type="apple">worm</fruit>
//! <vegetable />
//! </table>
//! "#;
//!
//! let doc = Document::parse(doc_raw.as_bytes()).unwrap();
//! let root = doc.root.unwrap();
//!
//! let fruit = root.find_child(|tag| tag.name == "fruit").unwrap().clone();
//! println!("{} [{:?}] = {}", fruit.name, fruit.attributes, fruit.text.unwrap());
//! ```
//!
//! ## Writing
//!
//! ```
//! use treexml::{Document, Element};
//!
//! let mut root = Element::new("root");
//! let mut child = Element::new("child");
//! child.text = Some("contents".to_owned());
//! root.children.push(child);
//!
//! let doc = Document{
//! root: Some(root),
//! .. Document::default()
//! };
//!
//! println!("{}", doc);
//! ```
//!
//!
// `error_chain!` can recurse deeply
#![recursion_limit = "1024"]
#[macro_use]
extern crate failure;
extern crate indexmap;
mod errors;
extern crate xml;
mod builder;
use std::borrow::Cow;
use std::fmt;
use std::io::{Read, Write};
use std::iter::Filter;
use std::slice::{Iter, IterMut};
use std::str::FromStr;
use std::string::ToString;
pub use errors::*;
pub use builder::*;
use indexmap::IndexMap;
use xml::common::XmlVersion as BaseXmlVersion;
/// Enumeration of XML versions
///
/// This exists solely because `xml-rs`'s `XmlVersion` doesn't implement Debug
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML Version 1.0
Version10,
/// XML Version 1.1
Version11,
}
impl From<BaseXmlVersion> for XmlVersion {
fn from(value: BaseXmlVersion) -> XmlVersion {
match value {
BaseXmlVersion::Version10 => XmlVersion::Version10,
BaseXmlVersion::Version11 => XmlVersion::Version11,
}
}
}
impl From<XmlVersion> for BaseXmlVersion {
fn | (value: XmlVersion) -> BaseXmlVersion {
match value {
XmlVersion::Version10 => BaseXmlVersion::Version10,
XmlVersion::Version11 => BaseXmlVersion::Version11,
}
}
}
/// An XML element
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Element {
/// Tag prefix, used for namespacing: `xsl` in `xsl:for-each`
pub prefix: Option<String>,
/// Tag name: `for-each` in `xsl:for-each`
pub name: String,
/// Tag attributes
pub attributes: IndexMap<String, String>,
/// A vector of child elements
pub children: Vec<Element>,
/// Contents of the element
pub text: Option<String>,
/// CDATA contents of the element
pub cdata: Option<String>,
}
impl Default for Element {
fn default() -> Self {
Element {
prefix: None,
name: "tag".to_owned(),
attributes: IndexMap::new(),
children: Vec::new(),
text: None,
cdata: None,
}
}
}
impl Element {
/// Create a new `Element` with the tag name `name`
pub fn new<S>(name: S) -> Element
where
S: ToString,
{
Element {
name: name.to_string(),
..Element::default()
}
}
/// Parse the contents of an element
fn parse<R: Read>(
&mut self,
mut reader: &mut xml::reader::EventReader<R>,
) -> Result<(), Error> {
use xml::reader::XmlEvent;
loop {
let ev = reader.next()?;
match ev {
XmlEvent::StartElement {
name, attributes, ..
} => {
let mut attr_map = IndexMap::new();
for attr in attributes {
let attr_name = match attr.name.prefix {
Some(prefix) => format!("{}:{}", prefix, attr.name.local_name),
None => attr.name.local_name,
};
attr_map.insert(attr_name, attr.value);
}
let mut child = Element {
prefix: name.prefix,
name: name.local_name,
attributes: attr_map,
..Element::default()
};
child.parse(&mut reader)?;
self.children.push(child);
}
XmlEvent::EndElement { name } => {
if name.prefix == self.prefix && name.local_name == self.name {
return Ok(());
} else {
// This should never happen, since the base xml library will panic first
panic!("Unexpected closing tag: {}, expected {}", name, self.name);
}
}
XmlEvent::Characters(s) => {
let text = match self.text {
Some(ref v) => v.clone(),
None => String::new(),
};
self.text = Some(text + &s);
}
XmlEvent::CData(s) => {
let cdata = match self.cdata {
Some(ref v) => v.clone(),
None => String::new(),
};
self.cdata = Some(cdata + &s);
}
XmlEvent::StartDocument { .. }
| XmlEvent::EndDocument
| XmlEvent::ProcessingInstruction { .. }
| XmlEvent::Whitespace(_)
| XmlEvent::Comment(_) => {}
}
}
}
/// Write an element and its contents to `writer`
fn write<W: Write>(&self, writer: &mut xml::writer::EventWriter<W>) -> Result<(), Error> {
use xml::attribute::Attribute;
use xml::name::Name;
use xml::namespace::Namespace;
use xml::writer::XmlEvent;
let name = Name::local(&self.name);
let mut attributes = Vec::with_capacity(self.attributes.len());
for (k, v) in &self.attributes {
attributes.push(Attribute {
name: Name::local(k),
value: v,
});
}
let namespace = Namespace::empty();
writer.write(XmlEvent::StartElement {
name: name,
attributes: Cow::Owned(attributes),
namespace: Cow::Owned(namespace),
})?;
if let Some(ref text) = self.text {
writer.write(XmlEvent::Characters(&text[..]))?;
}
if let Some(ref cdata) = self.cdata {
writer.write(XmlEvent::CData(&cdata[..]))?;
}
for e in &self.children {
e.write(writer)?;
}
writer.write(XmlEvent::EndElement { name: Some(name) })?;
Ok(())
}
/// Find a single child of the current `Element`, given a predicate
pub fn find_child<P>(&self, predicate: P) -> Option<&Element>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().find(predicate)
}
/// Find a single child of the current `Element`, given a predicate; returns a mutable borrow
pub fn find_child_mut<P>(&mut self, predicate: P) -> Option<&mut Element>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().find(predicate)
}
/// Traverse element using an xpath-like string: root/child/a
pub fn find(&self, path: &str) -> Result<&Element, Error> {
Self::find_path(&path.split('/').collect::<Vec<&str>>(), path, self)
}
pub fn find_value<T: FromStr>(&self, path: &str) -> Result<Option<T>, Error> {
let el = self.find(path)?;
if let Some(text) = el.text.as_ref() {
match T::from_str(text) {
Err(_) => Err(errors::Error::ValueFromStr {
t: text.to_string(),
}.into()),
Ok(value) => Ok(Some(value)),
}
} else {
Ok(None)
}
}
fn find_path<'a>(
path: &[&str],
original: &str,
tree: &'a Element,
) -> Result<&'a Element, Error> {
if path.is_empty() {
return Ok(tree);
}
match tree.find_child(|t| t.name == path[0]) {
Some(element) => Self::find_path(&path[1..], original, element),
None => Err(errors::Error::ElementNotFound { t: original.into() }.into()),
}
}
/// Filters the children of the current `Element`, given a predicate
pub fn filter_children<P>(&self, predicate: P) -> Filter<Iter<Element>, P>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().filter(predicate)
}
/// Filters the children of the current `Element`, given a predicate; returns a mutable iterator
pub fn filter_children_mut<P>(&mut self, predicate: P) -> Filter<IterMut<Element>, P>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().filter(predicate)
}
}
impl fmt::Display for Element {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let doc = Document {
root: Some(self.clone()),
..Document::default()
};
let mut v = Vec::<u8>::new();
doc.write_with(&mut v, false, " ", true).unwrap();
let s = String::from_utf8(v).unwrap();
f.write_str(&s[..])
}
}
/// An XML document
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Document {
/// Version of the XML document
pub version: XmlVersion,
/// Encoding of the XML document
pub encoding: String,
/// Root tag of the XML document
pub root: Option<Element>,
}
impl Default for Document {
fn default() -> Self {
Document {
version: XmlVersion::Version10,
encoding: "UTF-8".to_owned(),
root: None,
}
}
}
impl Document {
/// Create a new `Document` with default values
pub fn new() -> Document {
Document {
..Document::default()
}
}
/// Create a new `Document` with an Element or ElementBuilder at its root.
pub fn build(root: &mut ElementBuilder) -> Self {
Document {
root: Some(root.element()),
..Self::default()
}
}
/// Parse data from a reader to construct an XML document
///
/// # Failures
///
/// Passes any errors that the `xml-rs` library returns up the stack
pub fn parse<R: Read>(r: R) -> Result<Document, Error> {
use xml::reader::{EventReader, XmlEvent};
let mut reader = EventReader::new(r);
let mut doc = Document::new();
loop {
let ev = reader.next()?;
match ev {
XmlEvent::StartDocument {
version, encoding, ..
} => {
doc.version = XmlVersion::from(version);
doc.encoding = encoding;
}
XmlEvent::StartElement {
name, attributes, ..
} => {
// Start of the root element
let mut attr_map = IndexMap::new();
for attr in attributes {
let attr_name = match attr.name.prefix {
Some(prefix) => format!("{}:{}", prefix, attr.name.local_name),
None => attr.name.local_name,
};
attr_map.insert(attr_name, attr.value);
}
let mut root = Element {
prefix: name.prefix,
name: name.local_name,
attributes: attr_map,
..Element::default()
};
root.parse(&mut reader)?;
doc.root = Some(root);
}
XmlEvent::EndDocument => break,
_ => {}
}
}
Ok(doc)
}
pub fn write<W: Write>(&self, mut w: &mut W) -> Result<(), Error> {
self.write_with(&mut w, true, " ", true)
}
/// Writes a document to `w`
pub fn write_with<W: Write>(
&self,
w: &mut W,
document_decl: bool,
indent_str: &'static str,
indent: bool,
) -> Result<(), Error> {
use xml::writer::{EmitterConfig, XmlEvent};
let mut writer = EmitterConfig::new()
.perform_indent(indent)
.write_document_declaration(document_decl)
.indent_string(indent_str)
.create_writer(w);
if document_decl {
writer.write(XmlEvent::StartDocument {
version: self.version.into(),
encoding: Some(&self.encoding),
standalone: None,
})?;
}
if let Some(ref e) = self.root {
e.write(&mut writer)?;
}
Ok(())
}
}
impl fmt::Display for Document {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut v = Vec::<u8>::new();
self.write(&mut v).unwrap();
let s = String::from_utf8(v).unwrap();
f.write_str(&s[..])
}
}
| from | identifier_name |
metamandering_north_carolina.py | import os
import random
import json
import geopandas as gpd
import functools
import datetime
import matplotlib
from facefinder import *
import time
import requests
import zipfile
import io
import matplotlib.pyplot as plt
import numpy as np
import csv
from networkx.readwrite import json_graph
import math
import seaborn as sns
from functools import partial
import networkx as nx
import numpy as np
import copy
from gerrychain.tree import bipartition_tree as bpt
from gerrychain import Graph
from gerrychain import MarkovChain
from gerrychain.constraints import (Validator, single_flip_contiguous,
within_percent_of_ideal_population, UpperBound)
from gerrychain.proposals import propose_random_flip, propose_chunk_flip
from gerrychain.accept import always_accept
from gerrychain.updaters import Election, Tally, cut_edges
from gerrychain import GeographicPartition
from gerrychain.partition import Partition
from gerrychain.proposals import recom
from gerrychain.metrics import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def | (partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['population'] = 0
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
z = 0
num_cuts_list = []
seats_won_table = []
best_left = np.inf
best_right = -np.inf
print("begin chain")
for part in exp_chain:
z += 1
if z % 100 == 0:
print("step ", z)
seats_won = 0
for edge in part["cut_edges"]:
graph[edge[0]][edge[1]]["cut_times"] += 1
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats)
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
#left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
#right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
plt.figure()
plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram_metamander" + tag +".png"
plt.savefig(name)
plt.close()
edge_colors = [graph[edge[0]][edge[1]]["cut_times"] for edge in graph.edges()]
pos=nx.get_node_attributes(graph, 'pos')
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=1,
edge_color=edge_colors, node_shape='s',
cmap='magma', width=3)
plt.savefig("./plots/edges" + tag + ".png")
plt.close()
return
def main():
graph, dual = preprocessing("jsons/NC.json")
left_mander, right_mander = produce_gerrymanders(graph,12,'_nc',100,'tree')
hold_graph = copy.deepcopy(graph)
hold_dual = copy.deepcopy(dual)
num_dist = 13
metamander , k = metamander_around_partition(graph, dual, left_mander, '_nc' + "LEFTMANDER",num_dist)
produce_sample(metamander, k , '_nc')
main()
| step_num | identifier_name |
metamandering_north_carolina.py | import os
import random
import json
import geopandas as gpd
import functools
import datetime
import matplotlib
from facefinder import *
import time
import requests
import zipfile
import io
import matplotlib.pyplot as plt
import numpy as np
import csv
from networkx.readwrite import json_graph
import math
import seaborn as sns
from functools import partial
import networkx as nx
import numpy as np
import copy
from gerrychain.tree import bipartition_tree as bpt
from gerrychain import Graph
from gerrychain import MarkovChain
from gerrychain.constraints import (Validator, single_flip_contiguous,
within_percent_of_ideal_population, UpperBound)
from gerrychain.proposals import propose_random_flip, propose_chunk_flip
from gerrychain.accept import always_accept
from gerrychain.updaters import Election, Tally, cut_edges
from gerrychain import GeographicPartition
from gerrychain.partition import Partition
from gerrychain.proposals import recom
from gerrychain.metrics import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
|
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
z = 0
num_cuts_list = []
seats_won_table = []
best_left = np.inf
best_right = -np.inf
print("begin chain")
for part in exp_chain:
z += 1
if z % 100 == 0:
print("step ", z)
seats_won = 0
for edge in part["cut_edges"]:
graph[edge[0]][edge[1]]["cut_times"] += 1
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats)
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
#left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
#right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
plt.figure()
plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram_metamander" + tag +".png"
plt.savefig(name)
plt.close()
edge_colors = [graph[edge[0]][edge[1]]["cut_times"] for edge in graph.edges()]
pos=nx.get_node_attributes(graph, 'pos')
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=1,
edge_color=edge_colors, node_shape='s',
cmap='magma', width=3)
plt.savefig("./plots/edges" + tag + ".png")
plt.close()
return
def main():
graph, dual = preprocessing("jsons/NC.json")
left_mander, right_mander = produce_gerrymanders(graph,12,'_nc',100,'tree')
hold_graph = copy.deepcopy(graph)
hold_dual = copy.deepcopy(dual)
num_dist = 13
metamander , k = metamander_around_partition(graph, dual, left_mander, '_nc' + "LEFTMANDER",num_dist)
produce_sample(metamander, k , '_nc')
main()
| g_sierpinsky.nodes[node]['population'] = 0 | conditional_block |
metamandering_north_carolina.py | import os
import random
import json
import geopandas as gpd
import functools
import datetime
import matplotlib
from facefinder import *
import time
import requests
import zipfile
import io
import matplotlib.pyplot as plt
import numpy as np
import csv
from networkx.readwrite import json_graph
import math | import numpy as np
import copy
from gerrychain.tree import bipartition_tree as bpt
from gerrychain import Graph
from gerrychain import MarkovChain
from gerrychain.constraints import (Validator, single_flip_contiguous,
within_percent_of_ideal_population, UpperBound)
from gerrychain.proposals import propose_random_flip, propose_chunk_flip
from gerrychain.accept import always_accept
from gerrychain.updaters import Election, Tally, cut_edges
from gerrychain import GeographicPartition
from gerrychain.partition import Partition
from gerrychain.proposals import recom
from gerrychain.metrics import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['population'] = 0
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
z = 0
num_cuts_list = []
seats_won_table = []
best_left = np.inf
best_right = -np.inf
print("begin chain")
for part in exp_chain:
z += 1
if z % 100 == 0:
print("step ", z)
seats_won = 0
for edge in part["cut_edges"]:
graph[edge[0]][edge[1]]["cut_times"] += 1
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats)
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
#left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
#right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
plt.figure()
plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram_metamander" + tag +".png"
plt.savefig(name)
plt.close()
edge_colors = [graph[edge[0]][edge[1]]["cut_times"] for edge in graph.edges()]
pos=nx.get_node_attributes(graph, 'pos')
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=1,
edge_color=edge_colors, node_shape='s',
cmap='magma', width=3)
plt.savefig("./plots/edges" + tag + ".png")
plt.close()
return
def main():
graph, dual = preprocessing("jsons/NC.json")
left_mander, right_mander = produce_gerrymanders(graph,12,'_nc',100,'tree')
hold_graph = copy.deepcopy(graph)
hold_dual = copy.deepcopy(dual)
num_dist = 13
metamander , k = metamander_around_partition(graph, dual, left_mander, '_nc' + "LEFTMANDER",num_dist)
produce_sample(metamander, k , '_nc')
main() | import seaborn as sns
from functools import partial
import networkx as nx | random_line_split |
metamandering_north_carolina.py | import os
import random
import json
import geopandas as gpd
import functools
import datetime
import matplotlib
from facefinder import *
import time
import requests
import zipfile
import io
import matplotlib.pyplot as plt
import numpy as np
import csv
from networkx.readwrite import json_graph
import math
import seaborn as sns
from functools import partial
import networkx as nx
import numpy as np
import copy
from gerrychain.tree import bipartition_tree as bpt
from gerrychain import Graph
from gerrychain import MarkovChain
from gerrychain.constraints import (Validator, single_flip_contiguous,
within_percent_of_ideal_population, UpperBound)
from gerrychain.proposals import propose_random_flip, propose_chunk_flip
from gerrychain.accept import always_accept
from gerrychain.updaters import Election, Tally, cut_edges
from gerrychain import GeographicPartition
from gerrychain.partition import Partition
from gerrychain.proposals import recom
from gerrychain.metrics import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
|
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
z = 0
num_cuts_list = []
seats_won_table = []
best_left = np.inf
best_right = -np.inf
print("begin chain")
for part in exp_chain:
z += 1
if z % 100 == 0:
print("step ", z)
seats_won = 0
for edge in part["cut_edges"]:
graph[edge[0]][edge[1]]["cut_times"] += 1
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats)
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
#left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
#right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
plt.figure()
plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram_metamander" + tag +".png"
plt.savefig(name)
plt.close()
edge_colors = [graph[edge[0]][edge[1]]["cut_times"] for edge in graph.edges()]
pos=nx.get_node_attributes(graph, 'pos')
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=1,
edge_color=edge_colors, node_shape='s',
cmap='magma', width=3)
plt.savefig("./plots/edges" + tag + ".png")
plt.close()
return
def main():
graph, dual = preprocessing("jsons/NC.json")
left_mander, right_mander = produce_gerrymanders(graph,12,'_nc',100,'tree')
hold_graph = copy.deepcopy(graph)
hold_dual = copy.deepcopy(dual)
num_dist = 13
metamander , k = metamander_around_partition(graph, dual, left_mander, '_nc' + "LEFTMANDER",num_dist)
produce_sample(metamander, k , '_nc')
main()
| updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['population'] = 0
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k | identifier_body |
component.rs | use crate::code::CodeObject;
use crate::signatures::SignatureCollection;
use crate::{Engine, Module};
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::collections::{BTreeSet, HashMap};
use std::fs;
use std::mem;
use std::path::Path;
use std::ptr::NonNull;
use std::sync::Arc;
use wasmtime_environ::component::{
ComponentTypes, GlobalInitializer, LoweredIndex, RuntimeAlwaysTrapIndex,
RuntimeTranscoderIndex, StaticModuleIndex, Translator,
};
use wasmtime_environ::{EntityRef, FunctionLoc, ObjectKind, PrimaryMap, ScopeVec, SignatureIndex};
use wasmtime_jit::{CodeMemory, CompiledModuleInfo};
use wasmtime_runtime::{MmapVec, VMFunctionBody, VMTrampoline};
/// A compiled WebAssembly Component.
//
// FIXME: need to write more docs here.
#[derive(Clone)]
pub struct Component {
inner: Arc<ComponentInner>,
}
struct ComponentInner {
/// Core wasm modules that the component defined internally, indexed by the
/// compile-time-assigned `ModuleUpvarIndex`.
static_modules: PrimaryMap<StaticModuleIndex, Module>,
/// Code-related information such as the compiled artifact, type
/// information, etc.
///
/// Note that the `Arc` here is used to share this allocation with internal
/// modules.
code: Arc<CodeObject>,
/// Metadata produced during compilation.
info: CompiledComponentInfo,
}
#[derive(Serialize, Deserialize)]
struct CompiledComponentInfo {
/// Type information calculated during translation about this component.
component: wasmtime_environ::component::Component,
/// Where lowered function trampolines are located within the `text`
/// section of `code_memory`.
///
/// These trampolines are the function pointer within the
/// `VMCallerCheckedFuncRef` and will delegate indirectly to a host function
/// pointer when called.
lowerings: PrimaryMap<LoweredIndex, FunctionLoc>,
/// Where the "always trap" functions are located within the `text` section
/// of `code_memory`.
///
/// These functions are "degenerate functions" here solely to implement
/// functions that are `canon lift`'d then immediately `canon lower`'d. The
/// `u32` value here is the offset of the trap instruction from the start fo
/// the function.
always_trap: PrimaryMap<RuntimeAlwaysTrapIndex, FunctionLoc>,
/// Where all the cranelift-generated transcode functions are located in the
/// compiled image of this component.
transcoders: PrimaryMap<RuntimeTranscoderIndex, FunctionLoc>,
/// Extra trampolines other than those contained in static modules
/// necessary for this component.
trampolines: Vec<(SignatureIndex, FunctionLoc)>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
}
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn deserialize_file(engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(compiler)]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut modules) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
// Compile all core wasm modules, in parallel, which will internally
// compile all their functions in parallel as well.
let module_funcs = engine.run_maybe_parallel(modules.values_mut().collect(), |module| {
Module::compile_functions(engine, module, types.module_types())
})?;
// Compile all host-to-wasm trampolines where the required set of
// trampolines is unioned from all core wasm modules plus what the
// component itself needs.
let module_trampolines = modules
.iter()
.flat_map(|(_, m)| m.exported_signatures.iter().copied())
.collect::<BTreeSet<_>>();
let trampolines = module_trampolines
.iter()
.copied()
.chain(
// All lowered functions will require a trampoline to be available in
// case they're used when entering wasm. For example a lowered function
// could be immediately lifted in which case we'll need a trampoline to
// call that lowered function.
//
// Most of the time trampolines can come from the core wasm modules
// since lifted functions come from core wasm. For these esoteric cases
// though we may have to compile trampolines specifically into the
// component object as well in case core wasm doesn't provide the
// necessary trampoline.
component.initializers.iter().filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i.canonical_abi),
GlobalInitializer::AlwaysTrap(i) => Some(i.canonical_abi),
_ => None,
}),
)
.collect::<BTreeSet<_>>();
let compiled_trampolines = engine
.run_maybe_parallel(trampolines.iter().cloned().collect(), |i| {
compiler.compile_host_to_wasm_trampoline(&types[i])
})?;
// Compile all transcoders required which adapt from a
// core-wasm-specific ABI (e.g. 32 or 64-bit) into the host transcoder
// ABI through an indirect libcall.
let transcoders = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::Transcoder(i) => Some(i),
_ => None,
})
.collect();
let transcoders = engine.run_maybe_parallel(transcoders, |info| {
compiler
.component_compiler()
.compile_transcoder(&component, info, &types)
})?;
// Compile all "always trap" functions which are small typed shims that
// exits to solely trap immediately for components.
let always_trap = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::AlwaysTrap(i) => Some(i),
_ => None,
})
.collect();
let always_trap = engine.run_maybe_parallel(always_trap, |info| {
compiler
.component_compiler()
.compile_always_trap(&types[info.canonical_abi])
})?;
// Compile all "lowerings" which are adapters that go from core wasm
// into the host which will process the canonical ABI.
let lowerings = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i),
_ => None,
})
.collect();
let lowerings = engine.run_maybe_parallel(lowerings, |lowering| {
compiler
.component_compiler()
.compile_lowered_trampoline(&component, lowering, &types)
})?;
// Collect the results of all of the function-based compilations above
// into one large list of functions to get appended into the text
// section of the final module.
let mut funcs = Vec::new();
let mut module_func_start_index = Vec::new();
let mut func_index_to_module_index = Vec::new();
let mut func_infos = Vec::new();
for (i, list) in module_funcs.into_iter().enumerate() {
module_func_start_index.push(func_index_to_module_index.len());
let mut infos = Vec::new();
for (j, (info, func)) in list.into_iter().enumerate() {
func_index_to_module_index.push(i);
let name = format!("_wasm{i}_function{j}");
funcs.push((name, func));
infos.push(info);
}
func_infos.push(infos);
}
for (sig, func) in trampolines.iter().zip(compiled_trampolines) {
let name = format!("_wasm_trampoline{}", sig.as_u32());
funcs.push((name, func));
}
let ntranscoders = transcoders.len();
for (i, func) in transcoders.into_iter().enumerate() {
let name = format!("_wasm_component_transcoder{i}");
funcs.push((name, func));
}
let nalways_trap = always_trap.len();
for (i, func) in always_trap.into_iter().enumerate() {
let name = format!("_wasm_component_always_trap{i}");
funcs.push((name, func));
}
let nlowerings = lowerings.len();
for (i, func) in lowerings.into_iter().enumerate() {
let name = format!("_wasm_component_lowering{i}");
funcs.push((name, func));
}
let mut object = compiler.object(ObjectKind::Component)?;
let locs = compiler.append_code(&mut object, &funcs, tunables, &|i, idx| {
// Map from the `i`th function which is requesting the relocation to
// the index in `modules` that the function belongs to. Using that
// metadata we can resolve `idx: FuncIndex` to a `DefinedFuncIndex`
// to the index of that module's function that's being called.
//
// Note that this will panic if `i` is a function beyond the initial
// set of core wasm module functions. That's intentional, however,
// since trampolines and otherwise should not have relocations to
// resolve.
let module_index = func_index_to_module_index[i];
let defined_index = modules[StaticModuleIndex::new(module_index)]
.module
.defined_func_index(idx)
.unwrap();
// Additionally use the module index to determine where that
// module's list of functions started at to factor in as an offset
// as well.
let offset = module_func_start_index[module_index];
defined_index.index() + offset
})?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
// Disassemble the result of the appending to the text section, where
// each function is in the module, into respective maps.
let mut locs = locs.into_iter().map(|(_sym, loc)| loc);
let funcs = func_infos
.into_iter()
.map(|infos| {
infos
.into_iter()
.zip(&mut locs)
.collect::<PrimaryMap<_, _>>()
})
.collect::<Vec<_>>();
let signature_to_trampoline = trampolines
.iter()
.cloned()
.zip(&mut locs)
.collect::<HashMap<_, _>>();
let transcoders = locs
.by_ref()
.take(ntranscoders)
.collect::<PrimaryMap<RuntimeTranscoderIndex, _>>();
let always_trap = locs
.by_ref()
.take(nalways_trap)
.collect::<PrimaryMap<RuntimeAlwaysTrapIndex, _>>();
let lowerings = locs
.by_ref()
.take(nlowerings)
.collect::<PrimaryMap<LoweredIndex, _>>();
assert!(locs.next().is_none());
// Convert all `ModuleTranslation` instances into `CompiledModuleInfo`
// through an `ObjectBuilder` here. This is then used to create the
// final `mmap` which is the final compilation artifact.
let mut builder = wasmtime_jit::ObjectBuilder::new(object, tunables);
let mut static_modules = PrimaryMap::new();
for ((_, module), funcs) in modules.into_iter().zip(funcs) {
// Build the list of trampolines for this module from its set of
// exported signatures, which is the list of expected trampolines,
// from the set of trampolines that were compiled for everything
// within this component.
let trampolines = module
.exported_signatures
.iter()
.map(|sig| (*sig, signature_to_trampoline[sig]))
.collect();
let info = builder.append(module, funcs, trampolines)?;
static_modules.push(info);
}
let info = CompiledComponentInfo {
always_trap,
component,
lowerings,
trampolines: trampolines
.difference(&module_trampolines)
.map(|i| (*i, signature_to_trampoline[i]))
.collect(),
transcoders,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules,
};
builder.serialize_info(&artifacts);
let mmap = builder.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures = SignatureCollection::new_for_module(
engine.signatures(),
types.module_types(),
static_modules
.iter()
.flat_map(|(_, m)| m.trampolines.iter().copied())
.chain(info.trampolines.iter().copied())
.map(|(sig, loc)| {
let trampoline = code_memory.text()[loc.start as usize..].as_ptr();
(sig, unsafe {
mem::transmute::<*const u8, VMTrampoline>(trampoline)
})
}),
);
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> |
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn lowering_ptr(&self, index: LoweredIndex) -> NonNull<VMFunctionBody> {
let info = &self.inner.info.lowerings[index];
self.func(info)
}
pub(crate) fn always_trap_ptr(&self, index: RuntimeAlwaysTrapIndex) -> NonNull<VMFunctionBody> {
let loc = &self.inner.info.always_trap[index];
self.func(loc)
}
pub(crate) fn transcoder_ptr(&self, index: RuntimeTranscoderIndex) -> NonNull<VMFunctionBody> {
let info = &self.inner.info.transcoders[index];
self.func(info)
}
fn func(&self, loc: &FunctionLoc) -> NonNull<VMFunctionBody> {
let text = self.text();
let trampoline = &text[loc.start as usize..][..loc.length as usize];
NonNull::new(trampoline.as_ptr() as *mut VMFunctionBody).unwrap()
}
pub(crate) fn code_object(&self) -> &Arc<CodeObject> {
&self.inner.code
}
/// Same as [`Module::serialize`], except for a component.
///
/// Note that the artifact produced here must be passed to
/// [`Component::deserialize`] and is not compatible for use with
/// [`Module`].
///
/// [`Module::serialize`]: crate::Module::serialize
/// [`Module`]: crate::Module
pub fn serialize(&self) -> Result<Vec<u8>> {
Ok(self.code_object().code_memory().mmap().to_vec())
}
}
| {
match self.inner.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
} | identifier_body |
component.rs | use crate::code::CodeObject;
use crate::signatures::SignatureCollection;
use crate::{Engine, Module};
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::collections::{BTreeSet, HashMap};
use std::fs;
use std::mem;
use std::path::Path;
use std::ptr::NonNull;
use std::sync::Arc;
use wasmtime_environ::component::{
ComponentTypes, GlobalInitializer, LoweredIndex, RuntimeAlwaysTrapIndex,
RuntimeTranscoderIndex, StaticModuleIndex, Translator,
};
use wasmtime_environ::{EntityRef, FunctionLoc, ObjectKind, PrimaryMap, ScopeVec, SignatureIndex};
use wasmtime_jit::{CodeMemory, CompiledModuleInfo};
use wasmtime_runtime::{MmapVec, VMFunctionBody, VMTrampoline};
/// A compiled WebAssembly Component.
//
// FIXME: need to write more docs here.
#[derive(Clone)]
pub struct Component {
inner: Arc<ComponentInner>,
}
struct ComponentInner {
/// Core wasm modules that the component defined internally, indexed by the
/// compile-time-assigned `ModuleUpvarIndex`.
static_modules: PrimaryMap<StaticModuleIndex, Module>,
/// Code-related information such as the compiled artifact, type
/// information, etc.
///
/// Note that the `Arc` here is used to share this allocation with internal
/// modules.
code: Arc<CodeObject>,
/// Metadata produced during compilation.
info: CompiledComponentInfo,
}
#[derive(Serialize, Deserialize)]
struct CompiledComponentInfo {
/// Type information calculated during translation about this component.
component: wasmtime_environ::component::Component,
/// Where lowered function trampolines are located within the `text`
/// section of `code_memory`.
///
/// These trampolines are the function pointer within the
/// `VMCallerCheckedFuncRef` and will delegate indirectly to a host function
/// pointer when called.
lowerings: PrimaryMap<LoweredIndex, FunctionLoc>,
/// Where the "always trap" functions are located within the `text` section
/// of `code_memory`.
///
/// These functions are "degenerate functions" here solely to implement
/// functions that are `canon lift`'d then immediately `canon lower`'d. The
/// `u32` value here is the offset of the trap instruction from the start fo
/// the function.
always_trap: PrimaryMap<RuntimeAlwaysTrapIndex, FunctionLoc>,
/// Where all the cranelift-generated transcode functions are located in the
/// compiled image of this component.
transcoders: PrimaryMap<RuntimeTranscoderIndex, FunctionLoc>,
/// Extra trampolines other than those contained in static modules
/// necessary for this component.
trampolines: Vec<(SignatureIndex, FunctionLoc)>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
}
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn deserialize_file(engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(compiler)]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut modules) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
// Compile all core wasm modules, in parallel, which will internally
// compile all their functions in parallel as well.
let module_funcs = engine.run_maybe_parallel(modules.values_mut().collect(), |module| {
Module::compile_functions(engine, module, types.module_types())
})?;
// Compile all host-to-wasm trampolines where the required set of
// trampolines is unioned from all core wasm modules plus what the
// component itself needs.
let module_trampolines = modules
.iter()
.flat_map(|(_, m)| m.exported_signatures.iter().copied())
.collect::<BTreeSet<_>>();
let trampolines = module_trampolines
.iter()
.copied()
.chain(
// All lowered functions will require a trampoline to be available in
// case they're used when entering wasm. For example a lowered function
// could be immediately lifted in which case we'll need a trampoline to
// call that lowered function.
//
// Most of the time trampolines can come from the core wasm modules
// since lifted functions come from core wasm. For these esoteric cases
// though we may have to compile trampolines specifically into the
// component object as well in case core wasm doesn't provide the
// necessary trampoline.
component.initializers.iter().filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i.canonical_abi),
GlobalInitializer::AlwaysTrap(i) => Some(i.canonical_abi),
_ => None,
}),
)
.collect::<BTreeSet<_>>();
let compiled_trampolines = engine
.run_maybe_parallel(trampolines.iter().cloned().collect(), |i| {
compiler.compile_host_to_wasm_trampoline(&types[i])
})?;
// Compile all transcoders required which adapt from a
// core-wasm-specific ABI (e.g. 32 or 64-bit) into the host transcoder
// ABI through an indirect libcall.
let transcoders = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::Transcoder(i) => Some(i),
_ => None,
})
.collect();
let transcoders = engine.run_maybe_parallel(transcoders, |info| {
compiler
.component_compiler()
.compile_transcoder(&component, info, &types)
})?;
// Compile all "always trap" functions which are small typed shims that
// exits to solely trap immediately for components.
let always_trap = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::AlwaysTrap(i) => Some(i),
_ => None,
})
.collect();
let always_trap = engine.run_maybe_parallel(always_trap, |info| {
compiler
.component_compiler()
.compile_always_trap(&types[info.canonical_abi])
})?;
// Compile all "lowerings" which are adapters that go from core wasm
// into the host which will process the canonical ABI.
let lowerings = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i),
_ => None,
})
.collect();
let lowerings = engine.run_maybe_parallel(lowerings, |lowering| {
compiler
.component_compiler()
.compile_lowered_trampoline(&component, lowering, &types)
})?;
// Collect the results of all of the function-based compilations above
// into one large list of functions to get appended into the text
// section of the final module.
let mut funcs = Vec::new();
let mut module_func_start_index = Vec::new();
let mut func_index_to_module_index = Vec::new();
let mut func_infos = Vec::new();
for (i, list) in module_funcs.into_iter().enumerate() {
module_func_start_index.push(func_index_to_module_index.len());
let mut infos = Vec::new();
for (j, (info, func)) in list.into_iter().enumerate() {
func_index_to_module_index.push(i);
let name = format!("_wasm{i}_function{j}");
funcs.push((name, func));
infos.push(info);
}
func_infos.push(infos);
}
for (sig, func) in trampolines.iter().zip(compiled_trampolines) {
let name = format!("_wasm_trampoline{}", sig.as_u32());
funcs.push((name, func));
}
let ntranscoders = transcoders.len();
for (i, func) in transcoders.into_iter().enumerate() {
let name = format!("_wasm_component_transcoder{i}");
funcs.push((name, func));
}
let nalways_trap = always_trap.len();
for (i, func) in always_trap.into_iter().enumerate() {
let name = format!("_wasm_component_always_trap{i}");
funcs.push((name, func));
}
let nlowerings = lowerings.len();
for (i, func) in lowerings.into_iter().enumerate() {
let name = format!("_wasm_component_lowering{i}");
funcs.push((name, func));
}
let mut object = compiler.object(ObjectKind::Component)?;
let locs = compiler.append_code(&mut object, &funcs, tunables, &|i, idx| {
// Map from the `i`th function which is requesting the relocation to
// the index in `modules` that the function belongs to. Using that
// metadata we can resolve `idx: FuncIndex` to a `DefinedFuncIndex`
// to the index of that module's function that's being called.
//
// Note that this will panic if `i` is a function beyond the initial
// set of core wasm module functions. That's intentional, however,
// since trampolines and otherwise should not have relocations to
// resolve.
let module_index = func_index_to_module_index[i];
let defined_index = modules[StaticModuleIndex::new(module_index)]
.module
.defined_func_index(idx)
.unwrap();
// Additionally use the module index to determine where that
// module's list of functions started at to factor in as an offset
// as well.
let offset = module_func_start_index[module_index];
defined_index.index() + offset
})?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
// Disassemble the result of the appending to the text section, where
// each function is in the module, into respective maps.
let mut locs = locs.into_iter().map(|(_sym, loc)| loc);
let funcs = func_infos
.into_iter()
.map(|infos| {
infos
.into_iter()
.zip(&mut locs)
.collect::<PrimaryMap<_, _>>()
})
.collect::<Vec<_>>();
let signature_to_trampoline = trampolines
.iter()
.cloned()
.zip(&mut locs)
.collect::<HashMap<_, _>>();
let transcoders = locs
.by_ref()
.take(ntranscoders)
.collect::<PrimaryMap<RuntimeTranscoderIndex, _>>();
let always_trap = locs
.by_ref()
.take(nalways_trap)
.collect::<PrimaryMap<RuntimeAlwaysTrapIndex, _>>();
let lowerings = locs
.by_ref()
.take(nlowerings)
.collect::<PrimaryMap<LoweredIndex, _>>();
assert!(locs.next().is_none());
// Convert all `ModuleTranslation` instances into `CompiledModuleInfo`
// through an `ObjectBuilder` here. This is then used to create the
// final `mmap` which is the final compilation artifact.
let mut builder = wasmtime_jit::ObjectBuilder::new(object, tunables);
let mut static_modules = PrimaryMap::new();
for ((_, module), funcs) in modules.into_iter().zip(funcs) {
// Build the list of trampolines for this module from its set of
// exported signatures, which is the list of expected trampolines,
// from the set of trampolines that were compiled for everything
// within this component.
let trampolines = module
.exported_signatures
.iter()
.map(|sig| (*sig, signature_to_trampoline[sig]))
.collect();
let info = builder.append(module, funcs, trampolines)?;
static_modules.push(info);
}
let info = CompiledComponentInfo {
always_trap,
component,
lowerings,
trampolines: trampolines
.difference(&module_trampolines)
.map(|i| (*i, signature_to_trampoline[i]))
.collect(),
transcoders,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules,
};
builder.serialize_info(&artifacts);
let mmap = builder.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures = SignatureCollection::new_for_module(
engine.signatures(),
types.module_types(),
static_modules
.iter()
.flat_map(|(_, m)| m.trampolines.iter().copied())
.chain(info.trampolines.iter().copied())
.map(|(sig, loc)| {
let trampoline = code_memory.text()[loc.start as usize..].as_ptr();
(sig, unsafe {
mem::transmute::<*const u8, VMTrampoline>(trampoline)
})
}),
);
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
match self.inner.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn lowering_ptr(&self, index: LoweredIndex) -> NonNull<VMFunctionBody> { | let loc = &self.inner.info.always_trap[index];
self.func(loc)
}
pub(crate) fn transcoder_ptr(&self, index: RuntimeTranscoderIndex) -> NonNull<VMFunctionBody> {
let info = &self.inner.info.transcoders[index];
self.func(info)
}
fn func(&self, loc: &FunctionLoc) -> NonNull<VMFunctionBody> {
let text = self.text();
let trampoline = &text[loc.start as usize..][..loc.length as usize];
NonNull::new(trampoline.as_ptr() as *mut VMFunctionBody).unwrap()
}
pub(crate) fn code_object(&self) -> &Arc<CodeObject> {
&self.inner.code
}
/// Same as [`Module::serialize`], except for a component.
///
/// Note that the artifact produced here must be passed to
/// [`Component::deserialize`] and is not compatible for use with
/// [`Module`].
///
/// [`Module::serialize`]: crate::Module::serialize
/// [`Module`]: crate::Module
pub fn serialize(&self) -> Result<Vec<u8>> {
Ok(self.code_object().code_memory().mmap().to_vec())
}
} | let info = &self.inner.info.lowerings[index];
self.func(info)
}
pub(crate) fn always_trap_ptr(&self, index: RuntimeAlwaysTrapIndex) -> NonNull<VMFunctionBody> { | random_line_split |
component.rs | use crate::code::CodeObject;
use crate::signatures::SignatureCollection;
use crate::{Engine, Module};
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use std::collections::{BTreeSet, HashMap};
use std::fs;
use std::mem;
use std::path::Path;
use std::ptr::NonNull;
use std::sync::Arc;
use wasmtime_environ::component::{
ComponentTypes, GlobalInitializer, LoweredIndex, RuntimeAlwaysTrapIndex,
RuntimeTranscoderIndex, StaticModuleIndex, Translator,
};
use wasmtime_environ::{EntityRef, FunctionLoc, ObjectKind, PrimaryMap, ScopeVec, SignatureIndex};
use wasmtime_jit::{CodeMemory, CompiledModuleInfo};
use wasmtime_runtime::{MmapVec, VMFunctionBody, VMTrampoline};
/// A compiled WebAssembly Component.
//
// FIXME: need to write more docs here.
#[derive(Clone)]
pub struct Component {
inner: Arc<ComponentInner>,
}
struct ComponentInner {
/// Core wasm modules that the component defined internally, indexed by the
/// compile-time-assigned `ModuleUpvarIndex`.
static_modules: PrimaryMap<StaticModuleIndex, Module>,
/// Code-related information such as the compiled artifact, type
/// information, etc.
///
/// Note that the `Arc` here is used to share this allocation with internal
/// modules.
code: Arc<CodeObject>,
/// Metadata produced during compilation.
info: CompiledComponentInfo,
}
#[derive(Serialize, Deserialize)]
struct CompiledComponentInfo {
/// Type information calculated during translation about this component.
component: wasmtime_environ::component::Component,
/// Where lowered function trampolines are located within the `text`
/// section of `code_memory`.
///
/// These trampolines are the function pointer within the
/// `VMCallerCheckedFuncRef` and will delegate indirectly to a host function
/// pointer when called.
lowerings: PrimaryMap<LoweredIndex, FunctionLoc>,
/// Where the "always trap" functions are located within the `text` section
/// of `code_memory`.
///
/// These functions are "degenerate functions" here solely to implement
/// functions that are `canon lift`'d then immediately `canon lower`'d. The
/// `u32` value here is the offset of the trap instruction from the start fo
/// the function.
always_trap: PrimaryMap<RuntimeAlwaysTrapIndex, FunctionLoc>,
/// Where all the cranelift-generated transcode functions are located in the
/// compiled image of this component.
transcoders: PrimaryMap<RuntimeTranscoderIndex, FunctionLoc>,
/// Extra trampolines other than those contained in static modules
/// necessary for this component.
trampolines: Vec<(SignatureIndex, FunctionLoc)>,
}
#[derive(Serialize, Deserialize)]
pub(crate) struct ComponentArtifacts {
info: CompiledComponentInfo,
types: ComponentTypes,
static_modules: PrimaryMap<StaticModuleIndex, CompiledModuleInfo>,
}
impl Component {
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let bytes = bytes.as_ref();
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Component::from_binary(engine, &bytes)
}
/// Compiles a new WebAssembly component from a wasm file on disk pointed to
/// by `file`.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn from_file(engine: &Engine, file: impl AsRef<Path>) -> Result<Component> {
match Self::new(
engine,
&fs::read(&file).with_context(|| "failed to read input file")?,
) {
Ok(m) => Ok(m),
Err(e) => {
cfg_if::cfg_if! {
if #[cfg(feature = "wat")] {
let mut e = e.downcast::<wat::Error>()?;
e.set_path(file);
bail!(e)
} else {
Err(e)
}
}
}
}
}
/// Compiles a new WebAssembly component from the in-memory wasm image
/// provided.
//
// FIXME: need to write more docs here.
#[cfg(compiler)]
#[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Component> {
engine
.check_compatible_with_native_host()
.context("compilation settings are not compatible with the native host")?;
let (mmap, artifacts) = Component::build_artifacts(engine, binary)?;
let mut code_memory = CodeMemory::new(mmap)?;
code_memory.publish()?;
Component::from_parts(engine, Arc::new(code_memory), Some(artifacts))
}
/// Same as [`Module::deserialize`], but for components.
///
/// Note that the file referenced here must contain contents previously
/// produced by [`Engine::precompile_component`] or
/// [`Component::serialize`].
///
/// For more information see the [`Module::deserialize`] method.
///
/// [`Module::deserialize`]: crate::Module::deserialize
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Component> {
let code = engine.load_code_bytes(bytes.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Same as [`Module::deserialize_file`], but for components.
///
/// For more information see the [`Component::deserialize`] and
/// [`Module::deserialize_file`] methods.
///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file
pub unsafe fn deserialize_file(engine: &Engine, path: impl AsRef<Path>) -> Result<Component> {
let code = engine.load_code_file(path.as_ref(), ObjectKind::Component)?;
Component::from_parts(engine, code, None)
}
/// Performs the compilation phase for a component, translating and
/// validating the provided wasm binary to machine code.
///
/// This method will compile all nested core wasm binaries in addition to
/// any necessary extra functions required for operation with components.
/// The output artifact here is the serialized object file contained within
/// an owned mmap along with metadata about the compilation itself.
#[cfg(compiler)]
pub(crate) fn build_artifacts(
engine: &Engine,
binary: &[u8],
) -> Result<(MmapVec, ComponentArtifacts)> {
let tunables = &engine.config().tunables;
let compiler = engine.compiler();
let scope = ScopeVec::new();
let mut validator =
wasmparser::Validator::new_with_features(engine.config().features.clone());
let mut types = Default::default();
let (component, mut modules) =
Translator::new(tunables, &mut validator, &mut types, &scope)
.translate(binary)
.context("failed to parse WebAssembly module")?;
let types = types.finish();
// Compile all core wasm modules, in parallel, which will internally
// compile all their functions in parallel as well.
let module_funcs = engine.run_maybe_parallel(modules.values_mut().collect(), |module| {
Module::compile_functions(engine, module, types.module_types())
})?;
// Compile all host-to-wasm trampolines where the required set of
// trampolines is unioned from all core wasm modules plus what the
// component itself needs.
let module_trampolines = modules
.iter()
.flat_map(|(_, m)| m.exported_signatures.iter().copied())
.collect::<BTreeSet<_>>();
let trampolines = module_trampolines
.iter()
.copied()
.chain(
// All lowered functions will require a trampoline to be available in
// case they're used when entering wasm. For example a lowered function
// could be immediately lifted in which case we'll need a trampoline to
// call that lowered function.
//
// Most of the time trampolines can come from the core wasm modules
// since lifted functions come from core wasm. For these esoteric cases
// though we may have to compile trampolines specifically into the
// component object as well in case core wasm doesn't provide the
// necessary trampoline.
component.initializers.iter().filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i.canonical_abi),
GlobalInitializer::AlwaysTrap(i) => Some(i.canonical_abi),
_ => None,
}),
)
.collect::<BTreeSet<_>>();
let compiled_trampolines = engine
.run_maybe_parallel(trampolines.iter().cloned().collect(), |i| {
compiler.compile_host_to_wasm_trampoline(&types[i])
})?;
// Compile all transcoders required which adapt from a
// core-wasm-specific ABI (e.g. 32 or 64-bit) into the host transcoder
// ABI through an indirect libcall.
let transcoders = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::Transcoder(i) => Some(i),
_ => None,
})
.collect();
let transcoders = engine.run_maybe_parallel(transcoders, |info| {
compiler
.component_compiler()
.compile_transcoder(&component, info, &types)
})?;
// Compile all "always trap" functions which are small typed shims that
// exits to solely trap immediately for components.
let always_trap = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::AlwaysTrap(i) => Some(i),
_ => None,
})
.collect();
let always_trap = engine.run_maybe_parallel(always_trap, |info| {
compiler
.component_compiler()
.compile_always_trap(&types[info.canonical_abi])
})?;
// Compile all "lowerings" which are adapters that go from core wasm
// into the host which will process the canonical ABI.
let lowerings = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i),
_ => None,
})
.collect();
let lowerings = engine.run_maybe_parallel(lowerings, |lowering| {
compiler
.component_compiler()
.compile_lowered_trampoline(&component, lowering, &types)
})?;
// Collect the results of all of the function-based compilations above
// into one large list of functions to get appended into the text
// section of the final module.
let mut funcs = Vec::new();
let mut module_func_start_index = Vec::new();
let mut func_index_to_module_index = Vec::new();
let mut func_infos = Vec::new();
for (i, list) in module_funcs.into_iter().enumerate() {
module_func_start_index.push(func_index_to_module_index.len());
let mut infos = Vec::new();
for (j, (info, func)) in list.into_iter().enumerate() {
func_index_to_module_index.push(i);
let name = format!("_wasm{i}_function{j}");
funcs.push((name, func));
infos.push(info);
}
func_infos.push(infos);
}
for (sig, func) in trampolines.iter().zip(compiled_trampolines) {
let name = format!("_wasm_trampoline{}", sig.as_u32());
funcs.push((name, func));
}
let ntranscoders = transcoders.len();
for (i, func) in transcoders.into_iter().enumerate() {
let name = format!("_wasm_component_transcoder{i}");
funcs.push((name, func));
}
let nalways_trap = always_trap.len();
for (i, func) in always_trap.into_iter().enumerate() {
let name = format!("_wasm_component_always_trap{i}");
funcs.push((name, func));
}
let nlowerings = lowerings.len();
for (i, func) in lowerings.into_iter().enumerate() {
let name = format!("_wasm_component_lowering{i}");
funcs.push((name, func));
}
let mut object = compiler.object(ObjectKind::Component)?;
let locs = compiler.append_code(&mut object, &funcs, tunables, &|i, idx| {
// Map from the `i`th function which is requesting the relocation to
// the index in `modules` that the function belongs to. Using that
// metadata we can resolve `idx: FuncIndex` to a `DefinedFuncIndex`
// to the index of that module's function that's being called.
//
// Note that this will panic if `i` is a function beyond the initial
// set of core wasm module functions. That's intentional, however,
// since trampolines and otherwise should not have relocations to
// resolve.
let module_index = func_index_to_module_index[i];
let defined_index = modules[StaticModuleIndex::new(module_index)]
.module
.defined_func_index(idx)
.unwrap();
// Additionally use the module index to determine where that
// module's list of functions started at to factor in as an offset
// as well.
let offset = module_func_start_index[module_index];
defined_index.index() + offset
})?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
// Disassemble the result of the appending to the text section, where
// each function is in the module, into respective maps.
let mut locs = locs.into_iter().map(|(_sym, loc)| loc);
let funcs = func_infos
.into_iter()
.map(|infos| {
infos
.into_iter()
.zip(&mut locs)
.collect::<PrimaryMap<_, _>>()
})
.collect::<Vec<_>>();
let signature_to_trampoline = trampolines
.iter()
.cloned()
.zip(&mut locs)
.collect::<HashMap<_, _>>();
let transcoders = locs
.by_ref()
.take(ntranscoders)
.collect::<PrimaryMap<RuntimeTranscoderIndex, _>>();
let always_trap = locs
.by_ref()
.take(nalways_trap)
.collect::<PrimaryMap<RuntimeAlwaysTrapIndex, _>>();
let lowerings = locs
.by_ref()
.take(nlowerings)
.collect::<PrimaryMap<LoweredIndex, _>>();
assert!(locs.next().is_none());
// Convert all `ModuleTranslation` instances into `CompiledModuleInfo`
// through an `ObjectBuilder` here. This is then used to create the
// final `mmap` which is the final compilation artifact.
let mut builder = wasmtime_jit::ObjectBuilder::new(object, tunables);
let mut static_modules = PrimaryMap::new();
for ((_, module), funcs) in modules.into_iter().zip(funcs) {
// Build the list of trampolines for this module from its set of
// exported signatures, which is the list of expected trampolines,
// from the set of trampolines that were compiled for everything
// within this component.
let trampolines = module
.exported_signatures
.iter()
.map(|sig| (*sig, signature_to_trampoline[sig]))
.collect();
let info = builder.append(module, funcs, trampolines)?;
static_modules.push(info);
}
let info = CompiledComponentInfo {
always_trap,
component,
lowerings,
trampolines: trampolines
.difference(&module_trampolines)
.map(|i| (*i, signature_to_trampoline[i]))
.collect(),
transcoders,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules,
};
builder.serialize_info(&artifacts);
let mmap = builder.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures = SignatureCollection::new_for_module(
engine.signatures(),
types.module_types(),
static_modules
.iter()
.flat_map(|(_, m)| m.trampolines.iter().copied())
.chain(info.trampolines.iter().copied())
.map(|(sig, loc)| {
let trampoline = code_memory.text()[loc.start as usize..].as_ptr();
(sig, unsafe {
mem::transmute::<*const u8, VMTrampoline>(trampoline)
})
}),
);
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
match self.inner.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn | (&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn lowering_ptr(&self, index: LoweredIndex) -> NonNull<VMFunctionBody> {
let info = &self.inner.info.lowerings[index];
self.func(info)
}
pub(crate) fn always_trap_ptr(&self, index: RuntimeAlwaysTrapIndex) -> NonNull<VMFunctionBody> {
let loc = &self.inner.info.always_trap[index];
self.func(loc)
}
pub(crate) fn transcoder_ptr(&self, index: RuntimeTranscoderIndex) -> NonNull<VMFunctionBody> {
let info = &self.inner.info.transcoders[index];
self.func(info)
}
fn func(&self, loc: &FunctionLoc) -> NonNull<VMFunctionBody> {
let text = self.text();
let trampoline = &text[loc.start as usize..][..loc.length as usize];
NonNull::new(trampoline.as_ptr() as *mut VMFunctionBody).unwrap()
}
pub(crate) fn code_object(&self) -> &Arc<CodeObject> {
&self.inner.code
}
/// Same as [`Module::serialize`], except for a component.
///
/// Note that the artifact produced here must be passed to
/// [`Component::deserialize`] and is not compatible for use with
/// [`Module`].
///
/// [`Module::serialize`]: crate::Module::serialize
/// [`Module`]: crate::Module
pub fn serialize(&self) -> Result<Vec<u8>> {
Ok(self.code_object().code_memory().mmap().to_vec())
}
}
| text | identifier_name |
images.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package release
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/release-sdk/sign"
"sigs.k8s.io/release-utils/command"
)
// Images is a wrapper around container image related functionality.
type Images struct {
imageImpl
signer *sign.Signer
}
// NewImages creates a new Images instance
func NewImages() *Images {
return &Images{
imageImpl: &defaultImageImpl{},
signer: sign.New(sign.Default()),
}
}
// SetImpl can be used to set the internal image implementation.
func (i *Images) SetImpl(impl imageImpl) {
i.imageImpl = impl
}
// imageImpl is a client for working with container images.
//
//counterfeiter:generate . imageImpl
type imageImpl interface {
Execute(cmd string, args ...string) error
ExecuteOutput(cmd string, args ...string) (string, error)
RepoTagFromTarball(path string) (string, error)
SignImage(*sign.Signer, string) error
VerifyImage(*sign.Signer, string) error
}
type defaultImageImpl struct{}
func (*defaultImageImpl) Execute(cmd string, args ...string) error {
return command.New(cmd, args...).RunSilentSuccess()
}
func (*defaultImageImpl) ExecuteOutput(cmd string, args ...string) (string, error) {
res, err := command.New(cmd, args...).RunSilentSuccessOutput()
if err != nil {
return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return tagOutput.OutputTrimNL(), nil
}
func (*defaultImageImpl) SignImage(signer *sign.Signer, reference string) error {
_, err := signer.SignImage(reference)
return err
}
func (*defaultImageImpl) VerifyImage(_ *sign.Signer, _ string) error {
// TODO: bypassing this for now due to the fail in the promotion process
// that sign the images. We will release the Feb/2023 patch releases without full
// signatures but we will sign those in a near future in a deatached process
// revert this change when the patches are out
// _, err := signer.VerifyImage(reference)
// return err
return nil
}
var tagRegex = regexp.MustCompile(`^.+/(.+):.+$`)
// PublishImages releases container images to the provided target registry
func (i *Images) Publish(registry, version, buildPath string) error {
version = i.normalizeVersion(version)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof(
"Pushing container images from %s to registry %s",
releaseImagesPath, registry,
)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(path, origTag, newTagWithArch string) error {
if err := i.Execute(
"docker", "load", "-qi", path,
); err != nil {
return fmt.Errorf("load container image: %w", err)
}
if err := i.Execute(
"docker", "tag", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("tag container image: %w", err)
}
logrus.Infof("Pushing %s", newTagWithArch)
if err := i.Execute(
"gcloud", "docker", "--", "push", newTagWithArch,
); err != nil {
return fmt.Errorf("push container image: %w", err)
}
if err := i.SignImage(i.signer, newTagWithArch); err != nil {
return fmt.Errorf("sign container image: %w", err)
}
if err := i.Execute(
"docker", "rmi", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("remove local container image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
if err := os.Setenv("DOCKER_CLI_EXPERIMENTAL", "enabled"); err != nil {
return fmt.Errorf("enable docker experimental CLI: %w", err)
}
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
logrus.Infof("Creating manifest image %s", imageVersion)
manifests := []string{}
for _, arch := range arches {
manifests = append(manifests,
fmt.Sprintf("%s-%s:%s", image, arch, version),
)
}
if err := i.Execute("docker", append(
[]string{"manifest", "create", "--amend", imageVersion},
manifests...,
)...); err != nil {
return fmt.Errorf("create manifest: %w", err)
}
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil {
return true, nil
} else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check than Validate, which doesn't presuppose the
// existence of a local build directory. Used in CI builds to quickly validate
// if a build is actually required.
func (i *Images) Exists(registry, version string, fast bool) (bool, error) {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry, image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return false, fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return false, fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return false, fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return false, fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return false, fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return false, fmt.Errorf("remove manifest file: %w", err)
}
}
return true, nil
}
// GetManifestImages can be used to retrieve the map of built images and
// architectures.
func (i *Images) GetManifestImages(
registry, version, buildPath string,
forTarballFn func(path, origTag, newTagWithArch string) error,
) (map[string][]string, error) {
manifestImages := make(map[string][]string)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof("Getting manifest images in %s", releaseImagesPath)
archPaths, err := os.ReadDir(releaseImagesPath)
if err != nil {
return nil, fmt.Errorf("read images path %s: %w", releaseImagesPath, err)
}
for _, archPath := range archPaths {
arch := archPath.Name()
if !archPath.IsDir() {
logrus.Infof("Skipping %s because it's not a directory", arch)
continue
}
if err := filepath.Walk(
filepath.Join(releaseImagesPath, arch),
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
fileName := info.Name()
if !strings.HasSuffix(fileName, ".tar") {
logrus.Infof("Skipping non-tarball %s", fileName)
return nil
}
origTag, err := i.RepoTagFromTarball(path)
if err != nil {
return fmt.Errorf("getting repo tags for tarball: %w", err)
}
tagMatches := tagRegex.FindStringSubmatch(origTag)
if len(tagMatches) != 2 {
return fmt.Errorf(
"malformed tag %s in %s", origTag, path,
)
}
binary := tagMatches[1]
newTag := filepath.Join(
registry,
strings.TrimSuffix(binary, "-"+arch),
)
newTagWithArch := fmt.Sprintf("%s-%s:%s", newTag, arch, version)
manifestImages[newTag] = append(manifestImages[newTag], arch)
if forTarballFn != nil {
if err := forTarballFn(
path, origTag, newTagWithArch, | }
}
return nil
},
); err != nil {
return nil, fmt.Errorf("traversing path: %w", err)
}
}
return manifestImages, nil
}
// normalizeVersion normalizes an container image version by replacing all invalid characters.
func (i *Images) normalizeVersion(version string) string {
return strings.ReplaceAll(version, "+", "_")
} | ); err != nil {
return fmt.Errorf("executing tarball callback: %w", err) | random_line_split |
images.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package release
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/release-sdk/sign"
"sigs.k8s.io/release-utils/command"
)
// Images is a wrapper around container image related functionality.
type Images struct {
imageImpl
signer *sign.Signer
}
// NewImages creates a new Images instance
func NewImages() *Images {
return &Images{
imageImpl: &defaultImageImpl{},
signer: sign.New(sign.Default()),
}
}
// SetImpl can be used to set the internal image implementation.
func (i *Images) SetImpl(impl imageImpl) {
i.imageImpl = impl
}
// imageImpl is a client for working with container images.
//
//counterfeiter:generate . imageImpl
type imageImpl interface {
Execute(cmd string, args ...string) error
ExecuteOutput(cmd string, args ...string) (string, error)
RepoTagFromTarball(path string) (string, error)
SignImage(*sign.Signer, string) error
VerifyImage(*sign.Signer, string) error
}
type defaultImageImpl struct{}
func (*defaultImageImpl) Execute(cmd string, args ...string) error {
return command.New(cmd, args...).RunSilentSuccess()
}
func (*defaultImageImpl) ExecuteOutput(cmd string, args ...string) (string, error) {
res, err := command.New(cmd, args...).RunSilentSuccessOutput()
if err != nil {
return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return tagOutput.OutputTrimNL(), nil
}
func (*defaultImageImpl) SignImage(signer *sign.Signer, reference string) error {
_, err := signer.SignImage(reference)
return err
}
func (*defaultImageImpl) VerifyImage(_ *sign.Signer, _ string) error {
// TODO: bypassing this for now due to the fail in the promotion process
// that sign the images. We will release the Feb/2023 patch releases without full
// signatures but we will sign those in a near future in a deatached process
// revert this change when the patches are out
// _, err := signer.VerifyImage(reference)
// return err
return nil
}
var tagRegex = regexp.MustCompile(`^.+/(.+):.+$`)
// PublishImages releases container images to the provided target registry
func (i *Images) Publish(registry, version, buildPath string) error {
version = i.normalizeVersion(version)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof(
"Pushing container images from %s to registry %s",
releaseImagesPath, registry,
)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(path, origTag, newTagWithArch string) error {
if err := i.Execute(
"docker", "load", "-qi", path,
); err != nil {
return fmt.Errorf("load container image: %w", err)
}
if err := i.Execute(
"docker", "tag", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("tag container image: %w", err)
}
logrus.Infof("Pushing %s", newTagWithArch)
if err := i.Execute(
"gcloud", "docker", "--", "push", newTagWithArch,
); err != nil {
return fmt.Errorf("push container image: %w", err)
}
if err := i.SignImage(i.signer, newTagWithArch); err != nil {
return fmt.Errorf("sign container image: %w", err)
}
if err := i.Execute(
"docker", "rmi", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("remove local container image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
if err := os.Setenv("DOCKER_CLI_EXPERIMENTAL", "enabled"); err != nil {
return fmt.Errorf("enable docker experimental CLI: %w", err)
}
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
logrus.Infof("Creating manifest image %s", imageVersion)
manifests := []string{}
for _, arch := range arches {
manifests = append(manifests,
fmt.Sprintf("%s-%s:%s", image, arch, version),
)
}
if err := i.Execute("docker", append(
[]string{"manifest", "create", "--amend", imageVersion},
manifests...,
)...); err != nil {
return fmt.Errorf("create manifest: %w", err)
}
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil {
return true, nil
} else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check than Validate, which doesn't presuppose the
// existence of a local build directory. Used in CI builds to quickly validate
// if a build is actually required.
func (i *Images) Exists(registry, version string, fast bool) (bool, error) |
// GetManifestImages can be used to retrieve the map of built images and
// architectures.
func (i *Images) GetManifestImages(
registry, version, buildPath string,
forTarballFn func(path, origTag, newTagWithArch string) error,
) (map[string][]string, error) {
manifestImages := make(map[string][]string)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof("Getting manifest images in %s", releaseImagesPath)
archPaths, err := os.ReadDir(releaseImagesPath)
if err != nil {
return nil, fmt.Errorf("read images path %s: %w", releaseImagesPath, err)
}
for _, archPath := range archPaths {
arch := archPath.Name()
if !archPath.IsDir() {
logrus.Infof("Skipping %s because it's not a directory", arch)
continue
}
if err := filepath.Walk(
filepath.Join(releaseImagesPath, arch),
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
fileName := info.Name()
if !strings.HasSuffix(fileName, ".tar") {
logrus.Infof("Skipping non-tarball %s", fileName)
return nil
}
origTag, err := i.RepoTagFromTarball(path)
if err != nil {
return fmt.Errorf("getting repo tags for tarball: %w", err)
}
tagMatches := tagRegex.FindStringSubmatch(origTag)
if len(tagMatches) != 2 {
return fmt.Errorf(
"malformed tag %s in %s", origTag, path,
)
}
binary := tagMatches[1]
newTag := filepath.Join(
registry,
strings.TrimSuffix(binary, "-"+arch),
)
newTagWithArch := fmt.Sprintf("%s-%s:%s", newTag, arch, version)
manifestImages[newTag] = append(manifestImages[newTag], arch)
if forTarballFn != nil {
if err := forTarballFn(
path, origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("executing tarball callback: %w", err)
}
}
return nil
},
); err != nil {
return nil, fmt.Errorf("traversing path: %w", err)
}
}
return manifestImages, nil
}
// normalizeVersion normalizes an container image version by replacing all invalid characters.
func (i *Images) normalizeVersion(version string) string {
return strings.ReplaceAll(version, "+", "_")
}
| {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry, image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return false, fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return false, fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return false, fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return false, fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return false, fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return false, fmt.Errorf("remove manifest file: %w", err)
}
}
return true, nil
} | identifier_body |
images.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package release
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/release-sdk/sign"
"sigs.k8s.io/release-utils/command"
)
// Images is a wrapper around container image related functionality.
type Images struct {
imageImpl
signer *sign.Signer
}
// NewImages creates a new Images instance
func NewImages() *Images {
return &Images{
imageImpl: &defaultImageImpl{},
signer: sign.New(sign.Default()),
}
}
// SetImpl can be used to set the internal image implementation.
func (i *Images) SetImpl(impl imageImpl) {
i.imageImpl = impl
}
// imageImpl is a client for working with container images.
//
//counterfeiter:generate . imageImpl
type imageImpl interface {
Execute(cmd string, args ...string) error
ExecuteOutput(cmd string, args ...string) (string, error)
RepoTagFromTarball(path string) (string, error)
SignImage(*sign.Signer, string) error
VerifyImage(*sign.Signer, string) error
}
type defaultImageImpl struct{}
func (*defaultImageImpl) Execute(cmd string, args ...string) error {
return command.New(cmd, args...).RunSilentSuccess()
}
func (*defaultImageImpl) ExecuteOutput(cmd string, args ...string) (string, error) {
res, err := command.New(cmd, args...).RunSilentSuccessOutput()
if err != nil {
return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return tagOutput.OutputTrimNL(), nil
}
func (*defaultImageImpl) SignImage(signer *sign.Signer, reference string) error {
_, err := signer.SignImage(reference)
return err
}
func (*defaultImageImpl) VerifyImage(_ *sign.Signer, _ string) error {
// TODO: bypassing this for now due to the fail in the promotion process
// that sign the images. We will release the Feb/2023 patch releases without full
// signatures but we will sign those in a near future in a deatached process
// revert this change when the patches are out
// _, err := signer.VerifyImage(reference)
// return err
return nil
}
var tagRegex = regexp.MustCompile(`^.+/(.+):.+$`)
// PublishImages releases container images to the provided target registry
func (i *Images) Publish(registry, version, buildPath string) error {
version = i.normalizeVersion(version)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof(
"Pushing container images from %s to registry %s",
releaseImagesPath, registry,
)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(path, origTag, newTagWithArch string) error {
if err := i.Execute(
"docker", "load", "-qi", path,
); err != nil {
return fmt.Errorf("load container image: %w", err)
}
if err := i.Execute(
"docker", "tag", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("tag container image: %w", err)
}
logrus.Infof("Pushing %s", newTagWithArch)
if err := i.Execute(
"gcloud", "docker", "--", "push", newTagWithArch,
); err != nil {
return fmt.Errorf("push container image: %w", err)
}
if err := i.SignImage(i.signer, newTagWithArch); err != nil {
return fmt.Errorf("sign container image: %w", err)
}
if err := i.Execute(
"docker", "rmi", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("remove local container image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
if err := os.Setenv("DOCKER_CLI_EXPERIMENTAL", "enabled"); err != nil {
return fmt.Errorf("enable docker experimental CLI: %w", err)
}
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
logrus.Infof("Creating manifest image %s", imageVersion)
manifests := []string{}
for _, arch := range arches {
manifests = append(manifests,
fmt.Sprintf("%s-%s:%s", image, arch, version),
)
}
if err := i.Execute("docker", append(
[]string{"manifest", "create", "--amend", imageVersion},
manifests...,
)...); err != nil {
return fmt.Errorf("create manifest: %w", err)
}
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil | else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check than Validate, which doesn't presuppose the
// existence of a local build directory. Used in CI builds to quickly validate
// if a build is actually required.
func (i *Images) Exists(registry, version string, fast bool) (bool, error) {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry, image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return false, fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return false, fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return false, fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return false, fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return false, fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return false, fmt.Errorf("remove manifest file: %w", err)
}
}
return true, nil
}
// GetManifestImages can be used to retrieve the map of built images and
// architectures.
func (i *Images) GetManifestImages(
registry, version, buildPath string,
forTarballFn func(path, origTag, newTagWithArch string) error,
) (map[string][]string, error) {
manifestImages := make(map[string][]string)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof("Getting manifest images in %s", releaseImagesPath)
archPaths, err := os.ReadDir(releaseImagesPath)
if err != nil {
return nil, fmt.Errorf("read images path %s: %w", releaseImagesPath, err)
}
for _, archPath := range archPaths {
arch := archPath.Name()
if !archPath.IsDir() {
logrus.Infof("Skipping %s because it's not a directory", arch)
continue
}
if err := filepath.Walk(
filepath.Join(releaseImagesPath, arch),
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
fileName := info.Name()
if !strings.HasSuffix(fileName, ".tar") {
logrus.Infof("Skipping non-tarball %s", fileName)
return nil
}
origTag, err := i.RepoTagFromTarball(path)
if err != nil {
return fmt.Errorf("getting repo tags for tarball: %w", err)
}
tagMatches := tagRegex.FindStringSubmatch(origTag)
if len(tagMatches) != 2 {
return fmt.Errorf(
"malformed tag %s in %s", origTag, path,
)
}
binary := tagMatches[1]
newTag := filepath.Join(
registry,
strings.TrimSuffix(binary, "-"+arch),
)
newTagWithArch := fmt.Sprintf("%s-%s:%s", newTag, arch, version)
manifestImages[newTag] = append(manifestImages[newTag], arch)
if forTarballFn != nil {
if err := forTarballFn(
path, origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("executing tarball callback: %w", err)
}
}
return nil
},
); err != nil {
return nil, fmt.Errorf("traversing path: %w", err)
}
}
return manifestImages, nil
}
// normalizeVersion normalizes an container image version by replacing all invalid characters.
func (i *Images) normalizeVersion(version string) string {
return strings.ReplaceAll(version, "+", "_")
}
| {
return true, nil
} | conditional_block |
images.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package release
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/release-sdk/sign"
"sigs.k8s.io/release-utils/command"
)
// Images is a wrapper around container image related functionality.
type Images struct {
imageImpl
signer *sign.Signer
}
// NewImages creates a new Images instance
func NewImages() *Images {
return &Images{
imageImpl: &defaultImageImpl{},
signer: sign.New(sign.Default()),
}
}
// SetImpl can be used to set the internal image implementation.
func (i *Images) | (impl imageImpl) {
i.imageImpl = impl
}
// imageImpl is a client for working with container images.
//
//counterfeiter:generate . imageImpl
type imageImpl interface {
Execute(cmd string, args ...string) error
ExecuteOutput(cmd string, args ...string) (string, error)
RepoTagFromTarball(path string) (string, error)
SignImage(*sign.Signer, string) error
VerifyImage(*sign.Signer, string) error
}
type defaultImageImpl struct{}
func (*defaultImageImpl) Execute(cmd string, args ...string) error {
return command.New(cmd, args...).RunSilentSuccess()
}
func (*defaultImageImpl) ExecuteOutput(cmd string, args ...string) (string, error) {
res, err := command.New(cmd, args...).RunSilentSuccessOutput()
if err != nil {
return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return tagOutput.OutputTrimNL(), nil
}
func (*defaultImageImpl) SignImage(signer *sign.Signer, reference string) error {
_, err := signer.SignImage(reference)
return err
}
func (*defaultImageImpl) VerifyImage(_ *sign.Signer, _ string) error {
// TODO: bypassing this for now due to the fail in the promotion process
// that sign the images. We will release the Feb/2023 patch releases without full
// signatures but we will sign those in a near future in a deatached process
// revert this change when the patches are out
// _, err := signer.VerifyImage(reference)
// return err
return nil
}
var tagRegex = regexp.MustCompile(`^.+/(.+):.+$`)
// PublishImages releases container images to the provided target registry
func (i *Images) Publish(registry, version, buildPath string) error {
version = i.normalizeVersion(version)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof(
"Pushing container images from %s to registry %s",
releaseImagesPath, registry,
)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(path, origTag, newTagWithArch string) error {
if err := i.Execute(
"docker", "load", "-qi", path,
); err != nil {
return fmt.Errorf("load container image: %w", err)
}
if err := i.Execute(
"docker", "tag", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("tag container image: %w", err)
}
logrus.Infof("Pushing %s", newTagWithArch)
if err := i.Execute(
"gcloud", "docker", "--", "push", newTagWithArch,
); err != nil {
return fmt.Errorf("push container image: %w", err)
}
if err := i.SignImage(i.signer, newTagWithArch); err != nil {
return fmt.Errorf("sign container image: %w", err)
}
if err := i.Execute(
"docker", "rmi", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("remove local container image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
if err := os.Setenv("DOCKER_CLI_EXPERIMENTAL", "enabled"); err != nil {
return fmt.Errorf("enable docker experimental CLI: %w", err)
}
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
logrus.Infof("Creating manifest image %s", imageVersion)
manifests := []string{}
for _, arch := range arches {
manifests = append(manifests,
fmt.Sprintf("%s-%s:%s", image, arch, version),
)
}
if err := i.Execute("docker", append(
[]string{"manifest", "create", "--amend", imageVersion},
manifests...,
)...); err != nil {
return fmt.Errorf("create manifest: %w", err)
}
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil {
return true, nil
} else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check than Validate, which doesn't presuppose the
// existence of a local build directory. Used in CI builds to quickly validate
// if a build is actually required.
func (i *Images) Exists(registry, version string, fast bool) (bool, error) {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry, image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return false, fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return false, fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return false, fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return false, fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return false, fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return false, fmt.Errorf("remove manifest file: %w", err)
}
}
return true, nil
}
// GetManifestImages can be used to retrieve the map of built images and
// architectures.
func (i *Images) GetManifestImages(
registry, version, buildPath string,
forTarballFn func(path, origTag, newTagWithArch string) error,
) (map[string][]string, error) {
manifestImages := make(map[string][]string)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof("Getting manifest images in %s", releaseImagesPath)
archPaths, err := os.ReadDir(releaseImagesPath)
if err != nil {
return nil, fmt.Errorf("read images path %s: %w", releaseImagesPath, err)
}
for _, archPath := range archPaths {
arch := archPath.Name()
if !archPath.IsDir() {
logrus.Infof("Skipping %s because it's not a directory", arch)
continue
}
if err := filepath.Walk(
filepath.Join(releaseImagesPath, arch),
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
fileName := info.Name()
if !strings.HasSuffix(fileName, ".tar") {
logrus.Infof("Skipping non-tarball %s", fileName)
return nil
}
origTag, err := i.RepoTagFromTarball(path)
if err != nil {
return fmt.Errorf("getting repo tags for tarball: %w", err)
}
tagMatches := tagRegex.FindStringSubmatch(origTag)
if len(tagMatches) != 2 {
return fmt.Errorf(
"malformed tag %s in %s", origTag, path,
)
}
binary := tagMatches[1]
newTag := filepath.Join(
registry,
strings.TrimSuffix(binary, "-"+arch),
)
newTagWithArch := fmt.Sprintf("%s-%s:%s", newTag, arch, version)
manifestImages[newTag] = append(manifestImages[newTag], arch)
if forTarballFn != nil {
if err := forTarballFn(
path, origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("executing tarball callback: %w", err)
}
}
return nil
},
); err != nil {
return nil, fmt.Errorf("traversing path: %w", err)
}
}
return manifestImages, nil
}
// normalizeVersion normalizes an container image version by replacing all invalid characters.
func (i *Images) normalizeVersion(version string) string {
return strings.ReplaceAll(version, "+", "_")
}
| SetImpl | identifier_name |
settings.go | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package settings
import (
"fmt"
"strconv"
"strings"
"go.uber.org/zap"
)
const (
// DefaultConfigFilename is the default config filename.
DefaultConfigFilename = "prototool.yaml"
// GenPluginTypeNone says there is no specific plugin type.
GenPluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) String() string {
if s, ok := _genPluginTypeToString[g]; ok |
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool {
return _genPluginTypeToIsGo[g]
}
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs.
Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty" yaml:"extra_modifiers,omitempty"`
} `json:"go_options,omitempty" yaml:"go_options,omitempty"`
Plugins []struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Flags string `json:"flags,omitempty" yaml:"flags,omitempty"`
Output string `json:"output,omitempty" yaml:"output,omitempty"`
Path string `json:"path,omitempty" yaml:"path,omitempty"`
} `json:"plugins,omitempty" yaml:"plugins,omitempty"`
} `json:"generate,omitempty" yaml:"generate,omitempty"`
}
// ConfigProvider provides Configs.
type ConfigProvider interface {
// GetForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetForDir(dirPath string) (Config, error)
// Get tries to find a file named filePath with a config.
//
// The path must be an absolute path.
// The file must have either the extension .yaml or .json.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
Get(filePath string) (Config, error)
// GetFilePathForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is returned.
// If no such file is found, "" is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetFilePathForDir(dirPath string) (string, error)
// GetForDir tries to find a file named by one of the ConfigFilenames in the given
// directory and returns the cleaned absolute exclude prefixes. Unlike other functions
// on ConfigProvider, this has no recursive functionality - if there is no
// config file, nothing is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetExcludePrefixesForDir(dirPath string) ([]string, error)
}
// ConfigProviderOption is an option for a new ConfigProvider.
type ConfigProviderOption func(*configProvider)
// ConfigProviderWithLogger returns a ConfigProviderOption that uses the given logger.
//
// The default is to use zap.NewNop().
func ConfigProviderWithLogger(logger *zap.Logger) ConfigProviderOption {
return func(configProvider *configProvider) {
configProvider.logger = logger
}
}
// NewConfigProvider returns a new ConfigProvider.
func NewConfigProvider(options ...ConfigProviderOption) ConfigProvider {
return newConfigProvider(options...)
}
| {
return s
} | conditional_block |
settings.go | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package settings
import (
"fmt"
"strconv"
"strings"
"go.uber.org/zap"
)
const (
// DefaultConfigFilename is the default config filename.
DefaultConfigFilename = "prototool.yaml"
// GenPluginTypeNone says there is no specific plugin type.
GenPluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) String() string {
if s, ok := _genPluginTypeToString[g]; ok {
return s
}
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool {
return _genPluginTypeToIsGo[g]
}
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs. | Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty" yaml:"extra_modifiers,omitempty"`
} `json:"go_options,omitempty" yaml:"go_options,omitempty"`
Plugins []struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Flags string `json:"flags,omitempty" yaml:"flags,omitempty"`
Output string `json:"output,omitempty" yaml:"output,omitempty"`
Path string `json:"path,omitempty" yaml:"path,omitempty"`
} `json:"plugins,omitempty" yaml:"plugins,omitempty"`
} `json:"generate,omitempty" yaml:"generate,omitempty"`
}
// ConfigProvider provides Configs.
type ConfigProvider interface {
// GetForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetForDir(dirPath string) (Config, error)
// Get tries to find a file named filePath with a config.
//
// The path must be an absolute path.
// The file must have either the extension .yaml or .json.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
Get(filePath string) (Config, error)
// GetFilePathForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is returned.
// If no such file is found, "" is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetFilePathForDir(dirPath string) (string, error)
// GetForDir tries to find a file named by one of the ConfigFilenames in the given
// directory and returns the cleaned absolute exclude prefixes. Unlike other functions
// on ConfigProvider, this has no recursive functionality - if there is no
// config file, nothing is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetExcludePrefixesForDir(dirPath string) ([]string, error)
}
// ConfigProviderOption is an option for a new ConfigProvider.
type ConfigProviderOption func(*configProvider)
// ConfigProviderWithLogger returns a ConfigProviderOption that uses the given logger.
//
// The default is to use zap.NewNop().
func ConfigProviderWithLogger(logger *zap.Logger) ConfigProviderOption {
return func(configProvider *configProvider) {
configProvider.logger = logger
}
}
// NewConfigProvider returns a new ConfigProvider.
func NewConfigProvider(options ...ConfigProviderOption) ConfigProvider {
return newConfigProvider(options...)
} | random_line_split |
|
settings.go | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package settings
import (
"fmt"
"strconv"
"strings"
"go.uber.org/zap"
)
const (
// DefaultConfigFilename is the default config filename.
DefaultConfigFilename = "prototool.yaml"
// GenPluginTypeNone says there is no specific plugin type.
GenPluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) String() string {
if s, ok := _genPluginTypeToString[g]; ok {
return s
}
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool |
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs.
Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty" yaml:"extra_modifiers,omitempty"`
} `json:"go_options,omitempty" yaml:"go_options,omitempty"`
Plugins []struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Flags string `json:"flags,omitempty" yaml:"flags,omitempty"`
Output string `json:"output,omitempty" yaml:"output,omitempty"`
Path string `json:"path,omitempty" yaml:"path,omitempty"`
} `json:"plugins,omitempty" yaml:"plugins,omitempty"`
} `json:"generate,omitempty" yaml:"generate,omitempty"`
}
// ConfigProvider provides Configs.
type ConfigProvider interface {
// GetForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetForDir(dirPath string) (Config, error)
// Get tries to find a file named filePath with a config.
//
// The path must be an absolute path.
// The file must have either the extension .yaml or .json.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
Get(filePath string) (Config, error)
// GetFilePathForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is returned.
// If no such file is found, "" is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetFilePathForDir(dirPath string) (string, error)
// GetForDir tries to find a file named by one of the ConfigFilenames in the given
// directory and returns the cleaned absolute exclude prefixes. Unlike other functions
// on ConfigProvider, this has no recursive functionality - if there is no
// config file, nothing is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetExcludePrefixesForDir(dirPath string) ([]string, error)
}
// ConfigProviderOption is an option for a new ConfigProvider.
type ConfigProviderOption func(*configProvider)
// ConfigProviderWithLogger returns a ConfigProviderOption that uses the given logger.
//
// The default is to use zap.NewNop().
func ConfigProviderWithLogger(logger *zap.Logger) ConfigProviderOption {
return func(configProvider *configProvider) {
configProvider.logger = logger
}
}
// NewConfigProvider returns a new ConfigProvider.
func NewConfigProvider(options ...ConfigProviderOption) ConfigProvider {
return newConfigProvider(options...)
}
| {
return _genPluginTypeToIsGo[g]
} | identifier_body |
settings.go | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package settings
import (
"fmt"
"strconv"
"strings"
"go.uber.org/zap"
)
const (
// DefaultConfigFilename is the default config filename.
DefaultConfigFilename = "prototool.yaml"
// GenPluginTypeNone says there is no specific plugin type.
GenPluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) | () string {
if s, ok := _genPluginTypeToString[g]; ok {
return s
}
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool {
return _genPluginTypeToIsGo[g]
}
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs.
Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty" yaml:"extra_modifiers,omitempty"`
} `json:"go_options,omitempty" yaml:"go_options,omitempty"`
Plugins []struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Flags string `json:"flags,omitempty" yaml:"flags,omitempty"`
Output string `json:"output,omitempty" yaml:"output,omitempty"`
Path string `json:"path,omitempty" yaml:"path,omitempty"`
} `json:"plugins,omitempty" yaml:"plugins,omitempty"`
} `json:"generate,omitempty" yaml:"generate,omitempty"`
}
// ConfigProvider provides Configs.
type ConfigProvider interface {
// GetForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetForDir(dirPath string) (Config, error)
// Get tries to find a file named filePath with a config.
//
// The path must be an absolute path.
// The file must have either the extension .yaml or .json.
//
// If such a file is found, it is read as an ExternalConfig and converted to a Config.
// If no such file is found, Config{} is returned.
Get(filePath string) (Config, error)
// GetFilePathForDir tries to find a file named by one of the ConfigFilenames starting in the
// given directory, and going up a directory until hitting root.
//
// The directory must be an absolute path.
//
// If such a file is found, it is returned.
// If no such file is found, "" is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetFilePathForDir(dirPath string) (string, error)
// GetForDir tries to find a file named by one of the ConfigFilenames in the given
// directory and returns the cleaned absolute exclude prefixes. Unlike other functions
// on ConfigProvider, this has no recursive functionality - if there is no
// config file, nothing is returned.
// If multiple files named by one of the ConfigFilenames are found in the same
// directory, error is returned.
GetExcludePrefixesForDir(dirPath string) ([]string, error)
}
// ConfigProviderOption is an option for a new ConfigProvider.
type ConfigProviderOption func(*configProvider)
// ConfigProviderWithLogger returns a ConfigProviderOption that uses the given logger.
//
// The default is to use zap.NewNop().
func ConfigProviderWithLogger(logger *zap.Logger) ConfigProviderOption {
return func(configProvider *configProvider) {
configProvider.logger = logger
}
}
// NewConfigProvider returns a new ConfigProvider.
func NewConfigProvider(options ...ConfigProviderOption) ConfigProvider {
return newConfigProvider(options...)
}
| String | identifier_name |
lib.rs | //! # L2
//!# What is L2?
//!
//!> L2 is named after the L2 or Euclidean distance, a popular distance function in deep learning
//!
//!L2 is a Pytorch-style Tensor+Autograd library written in the Rust programming language. It contains a multidimensional array class, `Tensor`, with support for strided arrays, numpy-style array slicing,
//!broadcasting, and most major math operations (including fast, BLAS-accelerated matrix multiplication!). On top of this, L2 has a built-in efficient graph-based autograd engine that keeps track of all
//!operations performed on a tensor and topologically sorts and traverses the graph to compute the gradients.
//!
//!I also made a more simplified C++ version of l2 last year, which you can take a look at [here](https://github.com/bilal2vec/L2/tree/c%2B%2B)
//!
//!# Example
//!
//!```rust
//!use l2::tensor::*;
//!
//!fn main() -> Result<(), l2::errors::TensorError> {
//! let x: Tensor = Tensor::normal(&[2, 4], 0.0, 1.0)?;
//! let y: Tensor = Tensor::normal(&[4, 1], 0.0, 1.0)?;
//!
//! let z: Tensor = l2::matmul(&x, &y)?;
//!
//! z.backward();
//!
//! println!("{}", z);
//!
//! Ok(())
//!}
//!```
//!
//!# Design choices
//!
//!I made L2 to get better at using Rust and to learn more about how libraries like Pytorch and Tensorflow work behind the scenes, so don't expect this library to be production-ready :)
//!
//!L2 is surprisingly fast especially since I didn't try very hard to optimize all the operators, it's usually only less than one order of magnitude slower than Pytorch in most of the benchmarks that I ran. L2 //!only supports a cpu backend at the moment since I'm not familiar enough with rust to start working with CUDA and cudnn. So far, l2 doesn't have any Pytorch-style abstractions like the Parameter, Layer, or
//!Module classes. There might still be some bugs in the transpose operators and calling `.backward()` on tensors with more dimensions. I was interested in using Rust's [Const Generics](https://github.com/
//!rust-lang/rfcs/blob/master/text/2000-const-generics.md) to run compile-time shape checks but I decided to leave it until some other time.
//!
//!# Contributing
//!
//!This repository is still a work in progress, so if you find a bug, think there is something missing, or have any suggestions for new features, feel free to open an issue or a pull request. Feel free to use
//!the library or code from it in your own projects, and if you feel that some code used in this project hasn't been properly accredited, please open an issue.
//!
//!# Authors
//!
//!- _Bilal Khan_
//!
//!# License
//!
//!This project is licensed under the MIT License - see the license file for details
//!
//!# Acknowledgements
//!
//!The fast.ai deep learning from the foundations course (https://course.fast.ai/part2) teaches a lot about how to make your own deep learning library
//!
//!Some of the resources that I found useful when working on this library include:
//!
//!- http://blog.ezyang.com/2019/05/pytorch-internals/
//!- https://pytorch.org/tutorials/beginner/nn_tutorial.html
//!- https://eisenjulian.github.io/deep-learning-in-100-lines/
//!- https://medium.com/@florian.caesar/how-to-create-a-machine-learning-framework-from-scratch-in-491-steps-93428369a4eb
//!- https://medium.com/@johan.mabille/how-we-wrote-xtensor-1-n-n-dimensional-containers-f79f9f4966a7
//!- https://erikpartridge.com/2019-03/rust-ml-simd-blas-lapack
//!- https://medium.com/@GolDDranks/things-rust-doesnt-let-you-do-draft-f596a3c740a5
//!- https://datascience.stackexchange.com/questions/20139/gradients-for-bias-terms-in-backpropagation
//!- https://cs231n.github.io/optimization-2/
//!- https://cs231n.github.io/neural-networks-case-study/#grad
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://github.com/karpathy/micrograd
//!- https://rufflewind.com/2016-12-30/reverse-mode-automatic-differentiation
//! - https://github.com/ibab/rust-ad
//! - https://github.com/Rufflewind/revad/blob/eb3978b3ccdfa8189f3ff59d1ecee71f51c33fd7/revad.py
//! - https://github.com/srirambandi/ai
//!- https://discuss.pytorch.org/t/is-pytorch-autograd-tape-based/13992/3
//!- https://www.reddit.com/r/MachineLearning/comments/8ep130/d_how_does_autograd_work/
//!- https://github.com/mattjj/autodidact
//!- https://github.com/karpathy/recurrentjs
//!- https://github.com/karpathy/randomfun
//!- https://medium.com/@ralphmao95/simple-autograd-implementation-understand-automatic-differentiation-hand-by-hand-9e86f6d703ab
//!- https://evcu.github.io/ml/autograd/
//!- https://blog.paperspace.com/pytorch-101-understanding-graphs-and-automatic-differentiation/
//!- https://github.com/maciejkula/wyrm
//!- https://medium.com/@maciejkula/building-an-autodifferentiation-library-9ccf32c7a658
//!- https://github.com/evcu/numpy_autograd/blob/master/my_autograd.py#L147
//!- https://github.com/evcu/numpy_autograd/blob/master/Autograd.ipynb
//!- https://cs231n.github.io/optimization-2/
//!- https://github.com/explosion/thinc
//!- https://github.com/joelgrus/joelnet
//!- https://github.com/QuantStack/xtensor
//!- https://github.com/ThinkingTransistor/Sigma
//!- https://github.com/mratsim/Arraymancer
//!- https://github.com/siekmanj/sieknet
//!- https://github.com/siekmanj/sieknet_2.0
//!- https://github.com/Daniel-Liu-c0deb0t/Java-Machine-Learning
//!- https://github.com/karpathy/micrograd
//!
//!This README is based on:
//!
//!- https://github.com/bilal2vec/pytorch_zoo
//!- https://github.com/bilal2vec/grover
//!- https://github.com/rish-16/gpt2client
//!- https://github.com/mxbi/mlcrate
//!- https://github.com/athityakumar/colorls
//!- https://github.com/amitmerchant1990/electron-markdownify
//!
//!I used carbon.now.sh with the "Shades of Purple" theme for the screenshot at the beginning of this README
//!
//!This project contains ~4300 lines of code
pub mod errors;
mod ops;
pub mod tensor;
use errors::TensorError;
use tensor::Tensor;
pub fn add<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs + rhs
}
pub fn sub<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs - rhs
}
pub fn mul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs * rhs
}
pub fn div<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs / rhs
}
pub fn pow<'a>(lhs: &'a Tensor, exp: f32) -> Result<Tensor<'a>, TensorError> {
lhs.pow(exp)
}
pub fn sqrt<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sqrt()
}
pub fn exp<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.exp()
}
pub fn log10<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log10()
}
pub fn log<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log()
}
pub fn abs<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.abs()
}
pub fn sin<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sin()
}
pub fn cos<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.cos()
}
pub fn tan<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.tan()
}
pub fn sum<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.sum(dim)
}
pub fn mean<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.mean(dim)
}
pub fn max<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.max(dim)
}
pub fn min<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.min(dim)
}
pub fn argmax<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmax(dim)
}
pub fn argmin<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmin(dim)
}
pub fn matmul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.matmul(rhs)
}
pub fn concat<'a>(lhs: &'a Tensor, rhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.concat(&rhs, dim)
}
#[cfg(test)]
mod tests {
use super::tensor::*;
use super::*;
#[test]
fn test_add() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = add(&a, &b);
assert!((c.data == vec![4.0, 6.0]) && (c.shape == vec![2]))
}
#[test]
fn test_subtract() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sub(&a, &b);
assert!((c.data == vec![0.0, 0.0]) && (c.shape == vec![2]))
}
#[test]
fn test_mul() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mul(&a, &b);
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_div() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = div(&a, &b);
assert!((c.data == vec![1.0, 1.0]) && (c.shape == vec![2]))
}
#[test]
fn test_pow() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = pow(&a, 2.0).unwrap();
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn | () {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sum(&a, 0).unwrap();
assert!((c.data == vec![5.0]) && (c.shape == vec![1]))
}
#[test]
fn test_mean() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mean(&a, 0).unwrap();
assert!((c.data == vec![2.5]) && (c.shape == vec![1]))
}
#[test]
fn test_max() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = max(&a, 0).unwrap();
assert!((c.data == vec![3.0]) && (c.shape == vec![1]))
}
#[test]
fn test_min() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = min(&a, 0).unwrap();
assert!((c.data == vec![2.0]) && (c.shape == vec![1]))
}
#[test]
fn test_argmax() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = argmax(&a, 0).unwrap();
assert!((c.data == vec![1.0]) && (c.shape == vec![1]))
}
#[test]
fn test_argmin() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = argmin(&a, 0).unwrap();
assert!((c.data == vec![0.0]) && (c.shape == vec![1]))
}
#[test]
fn test_matmul() {
let x = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 4],
)
.unwrap();
let y = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 4, 2],
)
.unwrap();
let z = matmul(&x, &y).unwrap();
assert!(
(z.data == vec![50.0, 60.0, 114.0, 140.0, 514.0, 556.0, 706.0, 764.0])
&& (z.shape == vec![2, 2, 2])
)
}
#[test]
fn test_concat() {
let x = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 2, 2],
)
.unwrap();
let y = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 2, 2],
)
.unwrap();
let z = concat(&x, &y, -1).unwrap();
assert!(
(z.data
== vec![
1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 7.0, 8.0,
9.0, 10.0, 9.0, 10.0, 11.0, 12.0, 11.0, 12.0, 13.0, 14.0, 13.0, 14.0, 15.0,
16.0, 15.0, 16.0
])
&& (z.shape == vec![2, 2, 2, 4])
)
}
}
| test_sum | identifier_name |
lib.rs | //! # L2
//!# What is L2?
//!
//!> L2 is named after the L2 or Euclidean distance, a popular distance function in deep learning
//!
//!L2 is a Pytorch-style Tensor+Autograd library written in the Rust programming language. It contains a multidimensional array class, `Tensor`, with support for strided arrays, numpy-style array slicing,
//!broadcasting, and most major math operations (including fast, BLAS-accelerated matrix multiplication!). On top of this, L2 has a built-in efficient graph-based autograd engine that keeps track of all
//!operations performed on a tensor and topologically sorts and traverses the graph to compute the gradients.
//!
//!I also made a more simplified C++ version of l2 last year, which you can take a look at [here](https://github.com/bilal2vec/L2/tree/c%2B%2B)
//!
//!# Example
//!
//!```rust
//!use l2::tensor::*;
//!
//!fn main() -> Result<(), l2::errors::TensorError> {
//! let x: Tensor = Tensor::normal(&[2, 4], 0.0, 1.0)?;
//! let y: Tensor = Tensor::normal(&[4, 1], 0.0, 1.0)?;
//!
//! let z: Tensor = l2::matmul(&x, &y)?;
//!
//! z.backward();
//!
//! println!("{}", z);
//!
//! Ok(())
//!}
//!```
//!
//!# Design choices
//!
//!I made L2 to get better at using Rust and to learn more about how libraries like Pytorch and Tensorflow work behind the scenes, so don't expect this library to be production-ready :)
//!
//!L2 is surprisingly fast especially since I didn't try very hard to optimize all the operators, it's usually only less than one order of magnitude slower than Pytorch in most of the benchmarks that I ran. L2 //!only supports a cpu backend at the moment since I'm not familiar enough with rust to start working with CUDA and cudnn. So far, l2 doesn't have any Pytorch-style abstractions like the Parameter, Layer, or
//!Module classes. There might still be some bugs in the transpose operators and calling `.backward()` on tensors with more dimensions. I was interested in using Rust's [Const Generics](https://github.com/
//!rust-lang/rfcs/blob/master/text/2000-const-generics.md) to run compile-time shape checks but I decided to leave it until some other time.
//!
//!# Contributing
//!
//!This repository is still a work in progress, so if you find a bug, think there is something missing, or have any suggestions for new features, feel free to open an issue or a pull request. Feel free to use
//!the library or code from it in your own projects, and if you feel that some code used in this project hasn't been properly accredited, please open an issue.
//!
//!# Authors
//!
//!- _Bilal Khan_
//!
//!# License
//!
//!This project is licensed under the MIT License - see the license file for details
//!
//!# Acknowledgements
//!
//!The fast.ai deep learning from the foundations course (https://course.fast.ai/part2) teaches a lot about how to make your own deep learning library
//!
//!Some of the resources that I found useful when working on this library include:
//!
//!- http://blog.ezyang.com/2019/05/pytorch-internals/
//!- https://pytorch.org/tutorials/beginner/nn_tutorial.html
//!- https://eisenjulian.github.io/deep-learning-in-100-lines/
//!- https://medium.com/@florian.caesar/how-to-create-a-machine-learning-framework-from-scratch-in-491-steps-93428369a4eb
//!- https://medium.com/@johan.mabille/how-we-wrote-xtensor-1-n-n-dimensional-containers-f79f9f4966a7
//!- https://erikpartridge.com/2019-03/rust-ml-simd-blas-lapack
//!- https://medium.com/@GolDDranks/things-rust-doesnt-let-you-do-draft-f596a3c740a5
//!- https://datascience.stackexchange.com/questions/20139/gradients-for-bias-terms-in-backpropagation
//!- https://cs231n.github.io/optimization-2/
//!- https://cs231n.github.io/neural-networks-case-study/#grad
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://github.com/karpathy/micrograd
//!- https://rufflewind.com/2016-12-30/reverse-mode-automatic-differentiation
//! - https://github.com/ibab/rust-ad
//! - https://github.com/Rufflewind/revad/blob/eb3978b3ccdfa8189f3ff59d1ecee71f51c33fd7/revad.py
//! - https://github.com/srirambandi/ai
//!- https://discuss.pytorch.org/t/is-pytorch-autograd-tape-based/13992/3
//!- https://www.reddit.com/r/MachineLearning/comments/8ep130/d_how_does_autograd_work/
//!- https://github.com/mattjj/autodidact
//!- https://github.com/karpathy/recurrentjs
//!- https://github.com/karpathy/randomfun
//!- https://medium.com/@ralphmao95/simple-autograd-implementation-understand-automatic-differentiation-hand-by-hand-9e86f6d703ab
//!- https://evcu.github.io/ml/autograd/
//!- https://blog.paperspace.com/pytorch-101-understanding-graphs-and-automatic-differentiation/
//!- https://github.com/maciejkula/wyrm
//!- https://medium.com/@maciejkula/building-an-autodifferentiation-library-9ccf32c7a658
//!- https://github.com/evcu/numpy_autograd/blob/master/my_autograd.py#L147
//!- https://github.com/evcu/numpy_autograd/blob/master/Autograd.ipynb
//!- https://cs231n.github.io/optimization-2/
//!- https://github.com/explosion/thinc
//!- https://github.com/joelgrus/joelnet
//!- https://github.com/QuantStack/xtensor
//!- https://github.com/ThinkingTransistor/Sigma
//!- https://github.com/mratsim/Arraymancer
//!- https://github.com/siekmanj/sieknet
//!- https://github.com/siekmanj/sieknet_2.0
//!- https://github.com/Daniel-Liu-c0deb0t/Java-Machine-Learning
//!- https://github.com/karpathy/micrograd
//!
//!This README is based on:
//!
//!- https://github.com/bilal2vec/pytorch_zoo
//!- https://github.com/bilal2vec/grover
//!- https://github.com/rish-16/gpt2client
//!- https://github.com/mxbi/mlcrate
//!- https://github.com/athityakumar/colorls
//!- https://github.com/amitmerchant1990/electron-markdownify
//!
//!I used carbon.now.sh with the "Shades of Purple" theme for the screenshot at the beginning of this README
//!
//!This project contains ~4300 lines of code
pub mod errors;
mod ops;
pub mod tensor;
use errors::TensorError;
use tensor::Tensor;
pub fn add<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs + rhs
}
pub fn sub<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs - rhs
}
pub fn mul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs * rhs
}
pub fn div<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs / rhs
}
pub fn pow<'a>(lhs: &'a Tensor, exp: f32) -> Result<Tensor<'a>, TensorError> {
lhs.pow(exp)
}
pub fn sqrt<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sqrt()
}
pub fn exp<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.exp()
}
pub fn log10<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log10()
}
pub fn log<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log()
}
pub fn abs<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.abs()
}
pub fn sin<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sin()
}
pub fn cos<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.cos()
}
pub fn tan<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.tan()
}
pub fn sum<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.sum(dim)
}
pub fn mean<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.mean(dim)
}
pub fn max<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.max(dim)
}
pub fn min<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.min(dim)
}
pub fn argmax<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmax(dim)
}
pub fn argmin<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmin(dim) |
pub fn matmul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.matmul(rhs)
}
pub fn concat<'a>(lhs: &'a Tensor, rhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.concat(&rhs, dim)
}
#[cfg(test)]
mod tests {
use super::tensor::*;
use super::*;
#[test]
fn test_add() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = add(&a, &b);
assert!((c.data == vec![4.0, 6.0]) && (c.shape == vec![2]))
}
#[test]
fn test_subtract() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sub(&a, &b);
assert!((c.data == vec![0.0, 0.0]) && (c.shape == vec![2]))
}
#[test]
fn test_mul() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mul(&a, &b);
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_div() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = div(&a, &b);
assert!((c.data == vec![1.0, 1.0]) && (c.shape == vec![2]))
}
#[test]
fn test_pow() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = pow(&a, 2.0).unwrap();
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_sum() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sum(&a, 0).unwrap();
assert!((c.data == vec![5.0]) && (c.shape == vec![1]))
}
#[test]
fn test_mean() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mean(&a, 0).unwrap();
assert!((c.data == vec![2.5]) && (c.shape == vec![1]))
}
#[test]
fn test_max() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = max(&a, 0).unwrap();
assert!((c.data == vec![3.0]) && (c.shape == vec![1]))
}
#[test]
fn test_min() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = min(&a, 0).unwrap();
assert!((c.data == vec![2.0]) && (c.shape == vec![1]))
}
#[test]
fn test_argmax() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = argmax(&a, 0).unwrap();
assert!((c.data == vec![1.0]) && (c.shape == vec![1]))
}
#[test]
fn test_argmin() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = argmin(&a, 0).unwrap();
assert!((c.data == vec![0.0]) && (c.shape == vec![1]))
}
#[test]
fn test_matmul() {
let x = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 4],
)
.unwrap();
let y = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 4, 2],
)
.unwrap();
let z = matmul(&x, &y).unwrap();
assert!(
(z.data == vec![50.0, 60.0, 114.0, 140.0, 514.0, 556.0, 706.0, 764.0])
&& (z.shape == vec![2, 2, 2])
)
}
#[test]
fn test_concat() {
let x = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 2, 2],
)
.unwrap();
let y = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 2, 2],
)
.unwrap();
let z = concat(&x, &y, -1).unwrap();
assert!(
(z.data
== vec![
1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 7.0, 8.0,
9.0, 10.0, 9.0, 10.0, 11.0, 12.0, 11.0, 12.0, 13.0, 14.0, 13.0, 14.0, 15.0,
16.0, 15.0, 16.0
])
&& (z.shape == vec![2, 2, 2, 4])
)
}
} | } | random_line_split |
lib.rs | //! # L2
//!# What is L2?
//!
//!> L2 is named after the L2 or Euclidean distance, a popular distance function in deep learning
//!
//!L2 is a Pytorch-style Tensor+Autograd library written in the Rust programming language. It contains a multidimensional array class, `Tensor`, with support for strided arrays, numpy-style array slicing,
//!broadcasting, and most major math operations (including fast, BLAS-accelerated matrix multiplication!). On top of this, L2 has a built-in efficient graph-based autograd engine that keeps track of all
//!operations performed on a tensor and topologically sorts and traverses the graph to compute the gradients.
//!
//!I also made a more simplified C++ version of l2 last year, which you can take a look at [here](https://github.com/bilal2vec/L2/tree/c%2B%2B)
//!
//!# Example
//!
//!```rust
//!use l2::tensor::*;
//!
//!fn main() -> Result<(), l2::errors::TensorError> {
//! let x: Tensor = Tensor::normal(&[2, 4], 0.0, 1.0)?;
//! let y: Tensor = Tensor::normal(&[4, 1], 0.0, 1.0)?;
//!
//! let z: Tensor = l2::matmul(&x, &y)?;
//!
//! z.backward();
//!
//! println!("{}", z);
//!
//! Ok(())
//!}
//!```
//!
//!# Design choices
//!
//!I made L2 to get better at using Rust and to learn more about how libraries like Pytorch and Tensorflow work behind the scenes, so don't expect this library to be production-ready :)
//!
//!L2 is surprisingly fast especially since I didn't try very hard to optimize all the operators, it's usually only less than one order of magnitude slower than Pytorch in most of the benchmarks that I ran. L2 //!only supports a cpu backend at the moment since I'm not familiar enough with rust to start working with CUDA and cudnn. So far, l2 doesn't have any Pytorch-style abstractions like the Parameter, Layer, or
//!Module classes. There might still be some bugs in the transpose operators and calling `.backward()` on tensors with more dimensions. I was interested in using Rust's [Const Generics](https://github.com/
//!rust-lang/rfcs/blob/master/text/2000-const-generics.md) to run compile-time shape checks but I decided to leave it until some other time.
//!
//!# Contributing
//!
//!This repository is still a work in progress, so if you find a bug, think there is something missing, or have any suggestions for new features, feel free to open an issue or a pull request. Feel free to use
//!the library or code from it in your own projects, and if you feel that some code used in this project hasn't been properly accredited, please open an issue.
//!
//!# Authors
//!
//!- _Bilal Khan_
//!
//!# License
//!
//!This project is licensed under the MIT License - see the license file for details
//!
//!# Acknowledgements
//!
//!The fast.ai deep learning from the foundations course (https://course.fast.ai/part2) teaches a lot about how to make your own deep learning library
//!
//!Some of the resources that I found useful when working on this library include:
//!
//!- http://blog.ezyang.com/2019/05/pytorch-internals/
//!- https://pytorch.org/tutorials/beginner/nn_tutorial.html
//!- https://eisenjulian.github.io/deep-learning-in-100-lines/
//!- https://medium.com/@florian.caesar/how-to-create-a-machine-learning-framework-from-scratch-in-491-steps-93428369a4eb
//!- https://medium.com/@johan.mabille/how-we-wrote-xtensor-1-n-n-dimensional-containers-f79f9f4966a7
//!- https://erikpartridge.com/2019-03/rust-ml-simd-blas-lapack
//!- https://medium.com/@GolDDranks/things-rust-doesnt-let-you-do-draft-f596a3c740a5
//!- https://datascience.stackexchange.com/questions/20139/gradients-for-bias-terms-in-backpropagation
//!- https://cs231n.github.io/optimization-2/
//!- https://cs231n.github.io/neural-networks-case-study/#grad
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://github.com/karpathy/micrograd
//!- https://rufflewind.com/2016-12-30/reverse-mode-automatic-differentiation
//! - https://github.com/ibab/rust-ad
//! - https://github.com/Rufflewind/revad/blob/eb3978b3ccdfa8189f3ff59d1ecee71f51c33fd7/revad.py
//! - https://github.com/srirambandi/ai
//!- https://discuss.pytorch.org/t/is-pytorch-autograd-tape-based/13992/3
//!- https://www.reddit.com/r/MachineLearning/comments/8ep130/d_how_does_autograd_work/
//!- https://github.com/mattjj/autodidact
//!- https://github.com/karpathy/recurrentjs
//!- https://github.com/karpathy/randomfun
//!- https://medium.com/@ralphmao95/simple-autograd-implementation-understand-automatic-differentiation-hand-by-hand-9e86f6d703ab
//!- https://evcu.github.io/ml/autograd/
//!- https://blog.paperspace.com/pytorch-101-understanding-graphs-and-automatic-differentiation/
//!- https://github.com/maciejkula/wyrm
//!- https://medium.com/@maciejkula/building-an-autodifferentiation-library-9ccf32c7a658
//!- https://github.com/evcu/numpy_autograd/blob/master/my_autograd.py#L147
//!- https://github.com/evcu/numpy_autograd/blob/master/Autograd.ipynb
//!- https://cs231n.github.io/optimization-2/
//!- https://github.com/explosion/thinc
//!- https://github.com/joelgrus/joelnet
//!- https://github.com/QuantStack/xtensor
//!- https://github.com/ThinkingTransistor/Sigma
//!- https://github.com/mratsim/Arraymancer
//!- https://github.com/siekmanj/sieknet
//!- https://github.com/siekmanj/sieknet_2.0
//!- https://github.com/Daniel-Liu-c0deb0t/Java-Machine-Learning
//!- https://github.com/karpathy/micrograd
//!
//!This README is based on:
//!
//!- https://github.com/bilal2vec/pytorch_zoo
//!- https://github.com/bilal2vec/grover
//!- https://github.com/rish-16/gpt2client
//!- https://github.com/mxbi/mlcrate
//!- https://github.com/athityakumar/colorls
//!- https://github.com/amitmerchant1990/electron-markdownify
//!
//!I used carbon.now.sh with the "Shades of Purple" theme for the screenshot at the beginning of this README
//!
//!This project contains ~4300 lines of code
pub mod errors;
mod ops;
pub mod tensor;
use errors::TensorError;
use tensor::Tensor;
pub fn add<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs + rhs
}
pub fn sub<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs - rhs
}
pub fn mul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs * rhs
}
pub fn div<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs / rhs
}
pub fn pow<'a>(lhs: &'a Tensor, exp: f32) -> Result<Tensor<'a>, TensorError> {
lhs.pow(exp)
}
pub fn sqrt<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sqrt()
}
pub fn exp<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.exp()
}
pub fn log10<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log10()
}
pub fn log<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log()
}
pub fn abs<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.abs()
}
pub fn sin<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sin()
}
pub fn cos<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.cos()
}
pub fn tan<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.tan()
}
pub fn sum<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.sum(dim)
}
pub fn mean<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.mean(dim)
}
pub fn max<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.max(dim)
}
pub fn min<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.min(dim)
}
pub fn argmax<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmax(dim)
}
pub fn argmin<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmin(dim)
}
pub fn matmul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.matmul(rhs)
}
pub fn concat<'a>(lhs: &'a Tensor, rhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.concat(&rhs, dim)
}
#[cfg(test)]
mod tests {
use super::tensor::*;
use super::*;
#[test]
fn test_add() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = add(&a, &b);
assert!((c.data == vec![4.0, 6.0]) && (c.shape == vec![2]))
}
#[test]
fn test_subtract() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sub(&a, &b);
assert!((c.data == vec![0.0, 0.0]) && (c.shape == vec![2]))
}
#[test]
fn test_mul() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mul(&a, &b);
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_div() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = div(&a, &b);
assert!((c.data == vec![1.0, 1.0]) && (c.shape == vec![2]))
}
#[test]
fn test_pow() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = pow(&a, 2.0).unwrap();
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_sum() |
#[test]
fn test_mean() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mean(&a, 0).unwrap();
assert!((c.data == vec![2.5]) && (c.shape == vec![1]))
}
#[test]
fn test_max() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = max(&a, 0).unwrap();
assert!((c.data == vec![3.0]) && (c.shape == vec![1]))
}
#[test]
fn test_min() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = min(&a, 0).unwrap();
assert!((c.data == vec![2.0]) && (c.shape == vec![1]))
}
#[test]
fn test_argmax() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = argmax(&a, 0).unwrap();
assert!((c.data == vec![1.0]) && (c.shape == vec![1]))
}
#[test]
fn test_argmin() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = argmin(&a, 0).unwrap();
assert!((c.data == vec![0.0]) && (c.shape == vec![1]))
}
#[test]
fn test_matmul() {
let x = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 4],
)
.unwrap();
let y = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 4, 2],
)
.unwrap();
let z = matmul(&x, &y).unwrap();
assert!(
(z.data == vec![50.0, 60.0, 114.0, 140.0, 514.0, 556.0, 706.0, 764.0])
&& (z.shape == vec![2, 2, 2])
)
}
#[test]
fn test_concat() {
let x = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 2, 2],
)
.unwrap();
let y = Tensor::new(
vec![
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0,
16.0,
],
&[2, 2, 2, 2],
)
.unwrap();
let z = concat(&x, &y, -1).unwrap();
assert!(
(z.data
== vec![
1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 7.0, 8.0,
9.0, 10.0, 9.0, 10.0, 11.0, 12.0, 11.0, 12.0, 13.0, 14.0, 13.0, 14.0, 15.0,
16.0, 15.0, 16.0
])
&& (z.shape == vec![2, 2, 2, 4])
)
}
}
| {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sum(&a, 0).unwrap();
assert!((c.data == vec![5.0]) && (c.shape == vec![1]))
} | identifier_body |
lib.rs | use ndarray::{concatenate, s, Array1, Array2, Axis};
#[macro_use]
extern crate lazy_static;
peg::parser!(grammar parse_tile() for str {
pub rule parse_tile_id() -> usize
= "Tile " id:$(['0'..='9']+) ":" { id.parse().unwrap() }
pub rule parse_border() -> (u32, u32)
= line:$(['#' | '.']+) {
let line = line.chars().map(|x| match x {
'#' => '1',
'.' => '0',
_ => unimplemented!("invalid image pixel"),
}).collect::<String>();
(u32::from_str_radix(&line, 2).unwrap(),
u32::from_str_radix(&line.chars().rev().collect::<String>(), 2).unwrap())
}
pub rule parse_sub_image() -> Array1<u8>
= line:$(['#' | '.']+) {
let mut arr = unsafe { Array1::<u8>::uninitialized(line.len()) };
for (i, c) in line.chars().enumerate() {
match c {
'#' => arr[[i]] = 1,
'.' => arr[[i]] = 0,
_ => unimplemented!("unsupport character {}", c),
}
}
arr
}
});
pub trait ImageTransformer<T> {
fn original(&self) -> Array2<T>;
fn rot90_clockwise(&self) -> Array2<T>;
fn rot180_clockwise(&self) -> Array2<T>;
fn rot270_clockwise(&self) -> Array2<T>;
fn flip_vertical(&self) -> Array2<T>;
fn flip_horizontal(&self) -> Array2<T>;
fn flip_main_diagonal(&self) -> Array2<T>;
fn flip_sub_diagonal(&self) -> Array2<T>;
}
impl<T> ImageTransformer<T> for Array2<T>
where
T: Copy,
{
fn original(&self) -> Array2<T> {
self.clone()
}
fn rot90_clockwise(&self) -> Array2<T> {
let mut arr = self.clone();
arr.swap_axes(0, 1);
arr.flip_horizontal()
}
fn rot180_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..;-1]));
arr
}
fn rot270_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr.swap_axes(0, 1);
arr
}
fn flip_vertical(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..]));
arr
}
fn flip_horizontal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr
}
fn flip_main_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.t());
arr
}
fn flip_sub_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr: Array2<T> = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.rot270_clockwise().t());
arr.rot90_clockwise()
}
}
#[allow(unused)]
#[derive(Eq)]
pub struct Tile {
tile_id: usize,
sub_image: Array2<u8>,
borders: Vec<(u32, u32, u32, u32)>,
}
impl PartialEq for Tile {
fn eq(&self, other: &Self) -> bool {
self.tile_id == other.tile_id
}
}
use std::fmt::Debug;
impl Debug for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{}]", self.tile_id)?;
Ok(())
}
}
impl Tile {
pub fn new(data: &str) -> Self {
let lines = data
.split('\n')
.map(|s| s.trim_end().to_string())
.collect::<Vec<_>>();
let shape = lines[1].len() - 2;
let tile_id = parse_tile::parse_tile_id(&lines[0]).unwrap();
let (top, top_rev) = parse_tile::parse_border(&lines[1]).unwrap();
let left_col = lines
.iter()
.skip(1)
.map(|s| s.chars().next().unwrap())
.collect::<String>();
let (left, left_rev) = parse_tile::parse_border(&left_col).unwrap();
let right_col = lines
.iter()
.skip(1)
.map(|s| s.chars().last().unwrap())
.collect::<String>();
let (right, right_rev) = parse_tile::parse_border(&right_col).unwrap();
let (bottom, bottom_rev) = parse_tile::parse_border(&lines[lines.len() - 1]).unwrap();
let mut sub_image = unsafe { Array2::<u8>::uninitialized((shape, shape)) };
for (i, row) in lines.iter().enumerate().skip(2).take(shape) {
let row_pixels = parse_tile::parse_sub_image(&row[1..row.len() - 1]).unwrap();
sub_image.row_mut(i - 2).assign(&row_pixels);
}
Self {
tile_id,
sub_image,
borders: vec![
(top, right, bottom, left), // original sub image
(left_rev, top, right_rev, bottom), // rotate 90 degree clockwise
(bottom_rev, left_rev, top_rev, right_rev), // rotate 180 degree clockwise
(right, bottom_rev, left, top_rev), // rotate 270 degree clockwise
(bottom, right_rev, top, left_rev), // flip vertical
(top_rev, left, bottom_rev, right), // flip horizontal
(left, bottom, right, top), // flip along main diagonal
(right_rev, top_rev, left_rev, bottom_rev), // flip along sub diagonal
],
}
}
pub fn get_sub_image(&self, idx: usize) -> Array2<u8> {
match idx {
0 => self.sub_image.original(),
1 => self.sub_image.rot90_clockwise(),
2 => self.sub_image.rot180_clockwise(),
3 => self.sub_image.rot270_clockwise(),
4 => self.sub_image.flip_vertical(),
5 => self.sub_image.flip_horizontal(),
6 => self.sub_image.flip_main_diagonal(),
7 => self.sub_image.flip_sub_diagonal(),
_ => unreachable!("not a valid form index: {}", idx),
}
}
}
pub struct BigImage {
tiles: Vec<Tile>,
shape: usize,
}
impl BigImage {
pub fn new(tiles: Vec<Tile>) -> Self {
let shape = (tiles.len() as f64).sqrt() as usize;
Self { shape, tiles }
}
pub fn | <'a>(
&'a self,
row: usize,
col: usize,
prev_images: &[(&'a Tile, usize)],
) -> Vec<(&'a Tile, usize)> {
let mut result: Vec<(&Tile, usize)> = vec![];
result.extend_from_slice(prev_images);
for tile in self.tiles.iter() {
if result.iter().any(|(t, _)| t == &tile) {
continue;
}
result.push((tile, 0));
let upper_tile = if row > 0 {
Some(result[(row - 1) * self.shape + col])
} else {
None
};
let left_tile = if col > 0 {
Some(result[row * self.shape + col - 1])
} else {
None
};
for idx in 0..8 {
result.last_mut().unwrap().1 = idx;
if (row == 0
|| tile.borders[idx].0
== upper_tile.unwrap().0.borders[upper_tile.unwrap().1].2)
&& (col == 0
|| tile.borders[idx].3
== left_tile.unwrap().0.borders[left_tile.unwrap().1].1)
{
if row == self.shape - 1 && col == self.shape - 1 {
return result;
}
let (new_row, new_col) = if col + 1 >= self.shape {
(row + 1, 0)
} else {
(row, col + 1)
};
let ret = self.fits(new_row, new_col, &result);
if !ret.is_empty() {
return ret;
}
}
}
result.pop();
}
vec![]
}
pub fn splice_result(&self, fit_result: &[(&Tile, usize)]) -> Array2<u8> {
let pixels = fit_result[0].0.sub_image.shape()[0];
let mut big_image = Array2::<u8>::zeros((0, self.shape * pixels));
for row in 0..self.shape {
let mut row_image = Array2::<u8>::zeros((pixels, 0));
for col in 0..self.shape {
let result = fit_result[row * self.shape + col];
row_image = concatenate![Axis(1), row_image, result.0.get_sub_image(result.1)];
}
big_image = concatenate![Axis(0), big_image, row_image];
}
big_image
}
}
pub fn part1_solution(fit_result: &[(&Tile, usize)]) -> usize {
let shape = (fit_result.len() as f64).sqrt() as usize;
let corner_idx = &[0, shape - 1, shape * (shape - 1), shape * shape - 1];
fit_result
.iter()
.enumerate()
.filter(|(idx, _)| corner_idx.contains(idx))
.map(|(_, (t, _))| t.tile_id)
.product()
}
lazy_static! {
static ref MONSTER: Array2<u8> = unsafe {
Array2::from_shape_vec_unchecked(
(3, 20),
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0,
1, 0, 0, 0,
],
)
};
}
fn find_all_monsters(image: &Array2<u8>) -> Vec<(usize, usize)> {
let shape = image.shape()[0];
let mut found = vec![];
for row in 0..=shape - MONSTER.shape()[0] {
for col in 0..=shape - MONSTER.shape()[1] {
if &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) & &MONSTER.slice(s![.., ..])
== MONSTER.slice(s![.., ..])
{
found.push((row, col));
}
}
}
found
}
pub fn part2_solution(big_image: &BigImage, fit_result: &[(&Tile, usize)]) -> usize {
let mut image = big_image.splice_result(fit_result);
let monsters_pos = find_all_monsters(&image);
for (row, col) in monsters_pos {
let region = &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) - &MONSTER.slice(s![.., ..]);
image
.slice_mut(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
])
.assign(&(region));
}
image.iter().map(|x| *x as usize).sum::<usize>()
}
pub fn read_input(input_file: &str) -> Vec<Tile> {
std::fs::read_to_string(input_file)
.unwrap()
.split("\n\n")
.filter(|&b| !b.trim().is_empty())
.map(|b| Tile::new(b))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::Array;
#[test]
fn test_matrix_transforms() {
let m = Array::range(1., 5., 1.).into_shape((2, 2)).unwrap();
assert_eq!(m.original(), ndarray::arr2(&[[1., 2.], [3., 4.]]));
assert_eq!(m.rot90_clockwise(), ndarray::arr2(&[[3., 1.], [4., 2.]]));
assert_eq!(m.rot180_clockwise(), ndarray::arr2(&[[4., 3.], [2., 1.]]));
assert_eq!(m.rot270_clockwise(), ndarray::arr2(&[[2., 4.], [1., 3.]]));
assert_eq!(m.flip_vertical(), ndarray::arr2(&[[3., 4.], [1., 2.]]));
assert_eq!(m.flip_horizontal(), ndarray::arr2(&[[2., 1.], [4., 3.]]));
assert_eq!(m.flip_main_diagonal(), ndarray::arr2(&[[1., 3.], [2., 4.]]));
assert_eq!(m.flip_sub_diagonal(), ndarray::arr2(&[[4., 2.], [3., 1.]]));
}
#[test]
fn test_part1() {
let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part1_solution(&result), 20899048083289);
}
#[test]
fn test_part2() {
let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part2_solution(&test_image, &result), 273);
}
}
| fits | identifier_name |
lib.rs | use ndarray::{concatenate, s, Array1, Array2, Axis};
#[macro_use]
extern crate lazy_static;
peg::parser!(grammar parse_tile() for str {
pub rule parse_tile_id() -> usize
= "Tile " id:$(['0'..='9']+) ":" { id.parse().unwrap() }
pub rule parse_border() -> (u32, u32)
= line:$(['#' | '.']+) {
let line = line.chars().map(|x| match x {
'#' => '1',
'.' => '0',
_ => unimplemented!("invalid image pixel"),
}).collect::<String>();
(u32::from_str_radix(&line, 2).unwrap(),
u32::from_str_radix(&line.chars().rev().collect::<String>(), 2).unwrap())
}
pub rule parse_sub_image() -> Array1<u8>
= line:$(['#' | '.']+) {
let mut arr = unsafe { Array1::<u8>::uninitialized(line.len()) };
for (i, c) in line.chars().enumerate() {
match c {
'#' => arr[[i]] = 1,
'.' => arr[[i]] = 0,
_ => unimplemented!("unsupport character {}", c),
}
}
arr
}
});
pub trait ImageTransformer<T> {
fn original(&self) -> Array2<T>;
fn rot90_clockwise(&self) -> Array2<T>;
fn rot180_clockwise(&self) -> Array2<T>;
fn rot270_clockwise(&self) -> Array2<T>;
fn flip_vertical(&self) -> Array2<T>;
fn flip_horizontal(&self) -> Array2<T>;
fn flip_main_diagonal(&self) -> Array2<T>;
fn flip_sub_diagonal(&self) -> Array2<T>;
}
impl<T> ImageTransformer<T> for Array2<T>
where
T: Copy,
{
fn original(&self) -> Array2<T> {
self.clone()
}
fn rot90_clockwise(&self) -> Array2<T> {
let mut arr = self.clone();
arr.swap_axes(0, 1);
arr.flip_horizontal()
}
fn rot180_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..;-1]));
arr
}
fn rot270_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr.swap_axes(0, 1);
arr
}
fn flip_vertical(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..]));
arr
}
fn flip_horizontal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr
}
fn flip_main_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.t());
arr
}
fn flip_sub_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr: Array2<T> = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.rot270_clockwise().t());
arr.rot90_clockwise()
}
}
#[allow(unused)]
#[derive(Eq)]
pub struct Tile {
tile_id: usize,
sub_image: Array2<u8>,
borders: Vec<(u32, u32, u32, u32)>,
}
impl PartialEq for Tile {
fn eq(&self, other: &Self) -> bool {
self.tile_id == other.tile_id
}
}
use std::fmt::Debug;
impl Debug for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{}]", self.tile_id)?;
Ok(())
}
}
impl Tile {
pub fn new(data: &str) -> Self {
let lines = data
.split('\n')
.map(|s| s.trim_end().to_string())
.collect::<Vec<_>>();
let shape = lines[1].len() - 2;
let tile_id = parse_tile::parse_tile_id(&lines[0]).unwrap();
let (top, top_rev) = parse_tile::parse_border(&lines[1]).unwrap();
let left_col = lines
.iter()
.skip(1)
.map(|s| s.chars().next().unwrap())
.collect::<String>();
let (left, left_rev) = parse_tile::parse_border(&left_col).unwrap();
let right_col = lines
.iter()
.skip(1)
.map(|s| s.chars().last().unwrap())
.collect::<String>();
let (right, right_rev) = parse_tile::parse_border(&right_col).unwrap();
let (bottom, bottom_rev) = parse_tile::parse_border(&lines[lines.len() - 1]).unwrap();
let mut sub_image = unsafe { Array2::<u8>::uninitialized((shape, shape)) };
for (i, row) in lines.iter().enumerate().skip(2).take(shape) {
let row_pixels = parse_tile::parse_sub_image(&row[1..row.len() - 1]).unwrap();
sub_image.row_mut(i - 2).assign(&row_pixels);
}
Self {
tile_id,
sub_image,
borders: vec![
(top, right, bottom, left), // original sub image
(left_rev, top, right_rev, bottom), // rotate 90 degree clockwise
(bottom_rev, left_rev, top_rev, right_rev), // rotate 180 degree clockwise
(right, bottom_rev, left, top_rev), // rotate 270 degree clockwise
(bottom, right_rev, top, left_rev), // flip vertical
(top_rev, left, bottom_rev, right), // flip horizontal
(left, bottom, right, top), // flip along main diagonal
(right_rev, top_rev, left_rev, bottom_rev), // flip along sub diagonal
],
}
}
pub fn get_sub_image(&self, idx: usize) -> Array2<u8> {
match idx {
0 => self.sub_image.original(),
1 => self.sub_image.rot90_clockwise(),
2 => self.sub_image.rot180_clockwise(),
3 => self.sub_image.rot270_clockwise(),
4 => self.sub_image.flip_vertical(),
5 => self.sub_image.flip_horizontal(),
6 => self.sub_image.flip_main_diagonal(),
7 => self.sub_image.flip_sub_diagonal(),
_ => unreachable!("not a valid form index: {}", idx),
}
}
}
pub struct BigImage {
tiles: Vec<Tile>,
shape: usize,
}
impl BigImage {
pub fn new(tiles: Vec<Tile>) -> Self {
let shape = (tiles.len() as f64).sqrt() as usize;
Self { shape, tiles }
}
pub fn fits<'a>(
&'a self,
row: usize,
col: usize,
prev_images: &[(&'a Tile, usize)],
) -> Vec<(&'a Tile, usize)> {
let mut result: Vec<(&Tile, usize)> = vec![];
result.extend_from_slice(prev_images);
for tile in self.tiles.iter() {
if result.iter().any(|(t, _)| t == &tile) {
continue;
}
result.push((tile, 0));
let upper_tile = if row > 0 {
Some(result[(row - 1) * self.shape + col])
} else {
None
};
let left_tile = if col > 0 {
Some(result[row * self.shape + col - 1])
} else {
None
};
for idx in 0..8 {
result.last_mut().unwrap().1 = idx;
if (row == 0
|| tile.borders[idx].0
== upper_tile.unwrap().0.borders[upper_tile.unwrap().1].2)
&& (col == 0
|| tile.borders[idx].3
== left_tile.unwrap().0.borders[left_tile.unwrap().1].1)
{
if row == self.shape - 1 && col == self.shape - 1 {
return result;
}
let (new_row, new_col) = if col + 1 >= self.shape {
(row + 1, 0)
} else {
(row, col + 1)
};
let ret = self.fits(new_row, new_col, &result);
if !ret.is_empty() {
return ret;
}
}
}
result.pop();
}
vec![]
}
pub fn splice_result(&self, fit_result: &[(&Tile, usize)]) -> Array2<u8> {
let pixels = fit_result[0].0.sub_image.shape()[0];
let mut big_image = Array2::<u8>::zeros((0, self.shape * pixels));
for row in 0..self.shape {
let mut row_image = Array2::<u8>::zeros((pixels, 0));
for col in 0..self.shape {
let result = fit_result[row * self.shape + col];
row_image = concatenate![Axis(1), row_image, result.0.get_sub_image(result.1)];
}
big_image = concatenate![Axis(0), big_image, row_image];
}
big_image
}
}
pub fn part1_solution(fit_result: &[(&Tile, usize)]) -> usize {
let shape = (fit_result.len() as f64).sqrt() as usize;
let corner_idx = &[0, shape - 1, shape * (shape - 1), shape * shape - 1];
fit_result
.iter()
.enumerate()
.filter(|(idx, _)| corner_idx.contains(idx))
.map(|(_, (t, _))| t.tile_id)
.product()
}
lazy_static! {
static ref MONSTER: Array2<u8> = unsafe {
Array2::from_shape_vec_unchecked(
(3, 20),
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0,
1, 0, 0, 0,
],
)
};
}
fn find_all_monsters(image: &Array2<u8>) -> Vec<(usize, usize)> {
let shape = image.shape()[0];
let mut found = vec![];
for row in 0..=shape - MONSTER.shape()[0] {
for col in 0..=shape - MONSTER.shape()[1] {
if &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) & &MONSTER.slice(s![.., ..])
== MONSTER.slice(s![.., ..])
{
found.push((row, col));
}
}
}
found
}
pub fn part2_solution(big_image: &BigImage, fit_result: &[(&Tile, usize)]) -> usize {
let mut image = big_image.splice_result(fit_result);
let monsters_pos = find_all_monsters(&image);
for (row, col) in monsters_pos {
let region = &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) - &MONSTER.slice(s![.., ..]);
image
.slice_mut(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
])
.assign(&(region));
}
image.iter().map(|x| *x as usize).sum::<usize>()
}
pub fn read_input(input_file: &str) -> Vec<Tile> {
std::fs::read_to_string(input_file)
.unwrap()
.split("\n\n")
.filter(|&b| !b.trim().is_empty())
.map(|b| Tile::new(b))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::Array;
#[test]
fn test_matrix_transforms() {
let m = Array::range(1., 5., 1.).into_shape((2, 2)).unwrap();
assert_eq!(m.original(), ndarray::arr2(&[[1., 2.], [3., 4.]]));
assert_eq!(m.rot90_clockwise(), ndarray::arr2(&[[3., 1.], [4., 2.]]));
assert_eq!(m.rot180_clockwise(), ndarray::arr2(&[[4., 3.], [2., 1.]]));
assert_eq!(m.rot270_clockwise(), ndarray::arr2(&[[2., 4.], [1., 3.]]));
assert_eq!(m.flip_vertical(), ndarray::arr2(&[[3., 4.], [1., 2.]]));
assert_eq!(m.flip_horizontal(), ndarray::arr2(&[[2., 1.], [4., 3.]]));
assert_eq!(m.flip_main_diagonal(), ndarray::arr2(&[[1., 3.], [2., 4.]]));
assert_eq!(m.flip_sub_diagonal(), ndarray::arr2(&[[4., 2.], [3., 1.]]));
}
#[test]
fn test_part1() {
let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part1_solution(&result), 20899048083289);
}
#[test]
fn test_part2() { | }
} | let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part2_solution(&test_image, &result), 273); | random_line_split |
lib.rs | use ndarray::{concatenate, s, Array1, Array2, Axis};
#[macro_use]
extern crate lazy_static;
peg::parser!(grammar parse_tile() for str {
pub rule parse_tile_id() -> usize
= "Tile " id:$(['0'..='9']+) ":" { id.parse().unwrap() }
pub rule parse_border() -> (u32, u32)
= line:$(['#' | '.']+) {
let line = line.chars().map(|x| match x {
'#' => '1',
'.' => '0',
_ => unimplemented!("invalid image pixel"),
}).collect::<String>();
(u32::from_str_radix(&line, 2).unwrap(),
u32::from_str_radix(&line.chars().rev().collect::<String>(), 2).unwrap())
}
pub rule parse_sub_image() -> Array1<u8>
= line:$(['#' | '.']+) {
let mut arr = unsafe { Array1::<u8>::uninitialized(line.len()) };
for (i, c) in line.chars().enumerate() {
match c {
'#' => arr[[i]] = 1,
'.' => arr[[i]] = 0,
_ => unimplemented!("unsupport character {}", c),
}
}
arr
}
});
pub trait ImageTransformer<T> {
fn original(&self) -> Array2<T>;
fn rot90_clockwise(&self) -> Array2<T>;
fn rot180_clockwise(&self) -> Array2<T>;
fn rot270_clockwise(&self) -> Array2<T>;
fn flip_vertical(&self) -> Array2<T>;
fn flip_horizontal(&self) -> Array2<T>;
fn flip_main_diagonal(&self) -> Array2<T>;
fn flip_sub_diagonal(&self) -> Array2<T>;
}
impl<T> ImageTransformer<T> for Array2<T>
where
T: Copy,
{
fn original(&self) -> Array2<T> {
self.clone()
}
fn rot90_clockwise(&self) -> Array2<T> {
let mut arr = self.clone();
arr.swap_axes(0, 1);
arr.flip_horizontal()
}
fn rot180_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..;-1]));
arr
}
fn rot270_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr.swap_axes(0, 1);
arr
}
fn flip_vertical(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..]));
arr
}
fn flip_horizontal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr
}
fn flip_main_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.t());
arr
}
fn flip_sub_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr: Array2<T> = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.rot270_clockwise().t());
arr.rot90_clockwise()
}
}
#[allow(unused)]
#[derive(Eq)]
pub struct Tile {
tile_id: usize,
sub_image: Array2<u8>,
borders: Vec<(u32, u32, u32, u32)>,
}
impl PartialEq for Tile {
fn eq(&self, other: &Self) -> bool {
self.tile_id == other.tile_id
}
}
use std::fmt::Debug;
impl Debug for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{}]", self.tile_id)?;
Ok(())
}
}
impl Tile {
pub fn new(data: &str) -> Self |
pub fn get_sub_image(&self, idx: usize) -> Array2<u8> {
match idx {
0 => self.sub_image.original(),
1 => self.sub_image.rot90_clockwise(),
2 => self.sub_image.rot180_clockwise(),
3 => self.sub_image.rot270_clockwise(),
4 => self.sub_image.flip_vertical(),
5 => self.sub_image.flip_horizontal(),
6 => self.sub_image.flip_main_diagonal(),
7 => self.sub_image.flip_sub_diagonal(),
_ => unreachable!("not a valid form index: {}", idx),
}
}
}
pub struct BigImage {
tiles: Vec<Tile>,
shape: usize,
}
impl BigImage {
pub fn new(tiles: Vec<Tile>) -> Self {
let shape = (tiles.len() as f64).sqrt() as usize;
Self { shape, tiles }
}
pub fn fits<'a>(
&'a self,
row: usize,
col: usize,
prev_images: &[(&'a Tile, usize)],
) -> Vec<(&'a Tile, usize)> {
let mut result: Vec<(&Tile, usize)> = vec![];
result.extend_from_slice(prev_images);
for tile in self.tiles.iter() {
if result.iter().any(|(t, _)| t == &tile) {
continue;
}
result.push((tile, 0));
let upper_tile = if row > 0 {
Some(result[(row - 1) * self.shape + col])
} else {
None
};
let left_tile = if col > 0 {
Some(result[row * self.shape + col - 1])
} else {
None
};
for idx in 0..8 {
result.last_mut().unwrap().1 = idx;
if (row == 0
|| tile.borders[idx].0
== upper_tile.unwrap().0.borders[upper_tile.unwrap().1].2)
&& (col == 0
|| tile.borders[idx].3
== left_tile.unwrap().0.borders[left_tile.unwrap().1].1)
{
if row == self.shape - 1 && col == self.shape - 1 {
return result;
}
let (new_row, new_col) = if col + 1 >= self.shape {
(row + 1, 0)
} else {
(row, col + 1)
};
let ret = self.fits(new_row, new_col, &result);
if !ret.is_empty() {
return ret;
}
}
}
result.pop();
}
vec![]
}
pub fn splice_result(&self, fit_result: &[(&Tile, usize)]) -> Array2<u8> {
let pixels = fit_result[0].0.sub_image.shape()[0];
let mut big_image = Array2::<u8>::zeros((0, self.shape * pixels));
for row in 0..self.shape {
let mut row_image = Array2::<u8>::zeros((pixels, 0));
for col in 0..self.shape {
let result = fit_result[row * self.shape + col];
row_image = concatenate![Axis(1), row_image, result.0.get_sub_image(result.1)];
}
big_image = concatenate![Axis(0), big_image, row_image];
}
big_image
}
}
pub fn part1_solution(fit_result: &[(&Tile, usize)]) -> usize {
let shape = (fit_result.len() as f64).sqrt() as usize;
let corner_idx = &[0, shape - 1, shape * (shape - 1), shape * shape - 1];
fit_result
.iter()
.enumerate()
.filter(|(idx, _)| corner_idx.contains(idx))
.map(|(_, (t, _))| t.tile_id)
.product()
}
lazy_static! {
static ref MONSTER: Array2<u8> = unsafe {
Array2::from_shape_vec_unchecked(
(3, 20),
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0,
1, 0, 0, 0,
],
)
};
}
fn find_all_monsters(image: &Array2<u8>) -> Vec<(usize, usize)> {
let shape = image.shape()[0];
let mut found = vec![];
for row in 0..=shape - MONSTER.shape()[0] {
for col in 0..=shape - MONSTER.shape()[1] {
if &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) & &MONSTER.slice(s![.., ..])
== MONSTER.slice(s![.., ..])
{
found.push((row, col));
}
}
}
found
}
pub fn part2_solution(big_image: &BigImage, fit_result: &[(&Tile, usize)]) -> usize {
let mut image = big_image.splice_result(fit_result);
let monsters_pos = find_all_monsters(&image);
for (row, col) in monsters_pos {
let region = &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) - &MONSTER.slice(s![.., ..]);
image
.slice_mut(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
])
.assign(&(region));
}
image.iter().map(|x| *x as usize).sum::<usize>()
}
pub fn read_input(input_file: &str) -> Vec<Tile> {
std::fs::read_to_string(input_file)
.unwrap()
.split("\n\n")
.filter(|&b| !b.trim().is_empty())
.map(|b| Tile::new(b))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::Array;
#[test]
fn test_matrix_transforms() {
let m = Array::range(1., 5., 1.).into_shape((2, 2)).unwrap();
assert_eq!(m.original(), ndarray::arr2(&[[1., 2.], [3., 4.]]));
assert_eq!(m.rot90_clockwise(), ndarray::arr2(&[[3., 1.], [4., 2.]]));
assert_eq!(m.rot180_clockwise(), ndarray::arr2(&[[4., 3.], [2., 1.]]));
assert_eq!(m.rot270_clockwise(), ndarray::arr2(&[[2., 4.], [1., 3.]]));
assert_eq!(m.flip_vertical(), ndarray::arr2(&[[3., 4.], [1., 2.]]));
assert_eq!(m.flip_horizontal(), ndarray::arr2(&[[2., 1.], [4., 3.]]));
assert_eq!(m.flip_main_diagonal(), ndarray::arr2(&[[1., 3.], [2., 4.]]));
assert_eq!(m.flip_sub_diagonal(), ndarray::arr2(&[[4., 2.], [3., 1.]]));
}
#[test]
fn test_part1() {
let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part1_solution(&result), 20899048083289);
}
#[test]
fn test_part2() {
let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part2_solution(&test_image, &result), 273);
}
}
| {
let lines = data
.split('\n')
.map(|s| s.trim_end().to_string())
.collect::<Vec<_>>();
let shape = lines[1].len() - 2;
let tile_id = parse_tile::parse_tile_id(&lines[0]).unwrap();
let (top, top_rev) = parse_tile::parse_border(&lines[1]).unwrap();
let left_col = lines
.iter()
.skip(1)
.map(|s| s.chars().next().unwrap())
.collect::<String>();
let (left, left_rev) = parse_tile::parse_border(&left_col).unwrap();
let right_col = lines
.iter()
.skip(1)
.map(|s| s.chars().last().unwrap())
.collect::<String>();
let (right, right_rev) = parse_tile::parse_border(&right_col).unwrap();
let (bottom, bottom_rev) = parse_tile::parse_border(&lines[lines.len() - 1]).unwrap();
let mut sub_image = unsafe { Array2::<u8>::uninitialized((shape, shape)) };
for (i, row) in lines.iter().enumerate().skip(2).take(shape) {
let row_pixels = parse_tile::parse_sub_image(&row[1..row.len() - 1]).unwrap();
sub_image.row_mut(i - 2).assign(&row_pixels);
}
Self {
tile_id,
sub_image,
borders: vec![
(top, right, bottom, left), // original sub image
(left_rev, top, right_rev, bottom), // rotate 90 degree clockwise
(bottom_rev, left_rev, top_rev, right_rev), // rotate 180 degree clockwise
(right, bottom_rev, left, top_rev), // rotate 270 degree clockwise
(bottom, right_rev, top, left_rev), // flip vertical
(top_rev, left, bottom_rev, right), // flip horizontal
(left, bottom, right, top), // flip along main diagonal
(right_rev, top_rev, left_rev, bottom_rev), // flip along sub diagonal
],
}
} | identifier_body |
tasty_trade_importer.py | import os
import re
import copy
import math
from collections import OrderedDict
import arrow
import csv
# import a tasty_trade csv, and emit 1 row for every sell with the
# transaction_type(money_transfer,trade),account,date, symbol, quantity, *stock (0/1), *option (0/1), credit/debit (including fees)
# only works for daily options right now
def create_formatted_csv():
read_csvs()
def read_csvs():
path = '../../Downloads/tt/'
for filename in os.listdir(path):
if '.csv' in filename:
with open(os.path.join(path, filename)) as csvfile:
read_csv(csvfile)
def read_csv(csvfile):
'''assuming the headers are
Date/Time
Transaction Code
Transaction Subcode
Security ID
Symbol
Buy/Sell
Open/Close
Quantity
Expiration Date
Strike
Call/Put
Price
Fees
Amount
Description
Account Reference
'''
'''creating csv with headers
x - transaction_type(money_transfer,trade)
x - account
x - date
x - symbol
x - quantity
x - stock (0/1)
x - option (0/1)
x - p_l (including fees)
'''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.extend(getFormattedRowsForMoneyMoneyMovement(day, money_movement))
# get options/common trades
all_trades = getTradesForDay(day, og_rows, all_trades)
if all_trades:
# write row
# we only write out the p/l when the trade is over (quantity 0)
formatted_rows.extend(getFormattedRowsForTrades(day, all_trades))
# remove finished trades
all_trades = remove_completed_trades(all_trades)
# TODO: persist swing trades for next times
if all_trades:
print('*** these trades are still in progress {}'.format(all_trades))
# output csv
output_formatted_csv(formatted_rows)
print('done')
def copy_rows(csvfile):
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
copied = copy.deepcopy(row)
rows.append(copied)
return rows
def getDays(og_rows):
# get the unique days in the csv
unique_days = set()
for row in og_rows:
mdy = arrow.get(row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
unique_days.add(mdy)
# sort days ascending
arrow_days = [arrow.get(u_d, 'MM/DD/YYYY') for u_d in unique_days]
arrow_days.sort()
string_days = [a_d.format('MM/DD/YYYY') for a_d in arrow_days]
print('found {} trading days'.format(len(unique_days)))
return string_days
def sameDay(mdy, og_row):
og_mdy = arrow.get(og_row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
return mdy == og_mdy
def moneyMovementForDay(day, og_rows):
money_movement = []
# get each money movement event for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
if 'money movement' in row['Transaction Code'].lower():
money_movement.append(getAmount(row))
return money_movement
def getTradesForDay(day, og_rows, trades):
# TODO: support long term swing trades (need a db ;))
# trades = {}
# group by symbol (commons or options)
'''
{
symbol:{
commons: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
options: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else: | trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity'])
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def getFormattedRowsForMoneyMoneyMovement(day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) * 100
return str(round(percent_gain, 2))
def create_trade_dict():
trades = {
'commons': create_emtpy_common_or_options_dict(),
'options': create_emtpy_common_or_options_dict()
}
return trades
def create_emtpy_common_or_options_dict():
shell = {
'net_amount': 0,
'quantity': 0,
'amount_bought': 0,
'amount_sold': 0
}
return shell
def is_trade_type_empty(trade_type):
# common_zeros = [ value for key, value in trade_type['commons'].items() if value == 0]
# option_zeros = [ value for key, value in trade_type['options'].items() if value == 0]
# common_zeros.extend(option_zeros)
# return len(common_zeros) == 0
return trade_type['commons']['quantity'] == 0 and trade_type['options']['quantity'] == 0
# ======== Row funcs ===========
def amountWithFees(og_row):
fees = float(og_row['Fees'])
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price) + fees
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def getAmount(og_row):
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price)
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def isPurchase(og_row):
# negative is purchase
return getAmount(og_row) < 0
def isOption(og_row):
# is option trade?
if not og_row['Call/Put']:
return False
return True
def isCallOption(og_row):
if isOption(og_row):
if 'c' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def isPutOption(og_row):
if isOption(og_row):
if 'p' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def output_formatted_csv(formatted_rows):
print('...creating csv')
with open('formatted_tt.csv', 'w', newline='') as out_csvfile:
fieldnames = ['transaction_type','account','date','symbol','quantity','stock','option','p_l', '%']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames)
writer.writeheader()
for formatted in formatted_rows:
writer.writerow(formatted)
print('finished writing csv')
'''create a csv with
'''
# save deposits
# save withdrawls
# save balance adustments = comes out of account andrew
if __name__ == "__main__":
create_formatted_csv() | random_line_split |
|
tasty_trade_importer.py | import os
import re
import copy
import math
from collections import OrderedDict
import arrow
import csv
# import a tasty_trade csv, and emit 1 row for every sell with the
# transaction_type(money_transfer,trade),account,date, symbol, quantity, *stock (0/1), *option (0/1), credit/debit (including fees)
# only works for daily options right now
def create_formatted_csv():
read_csvs()
def read_csvs():
path = '../../Downloads/tt/'
for filename in os.listdir(path):
if '.csv' in filename:
with open(os.path.join(path, filename)) as csvfile:
read_csv(csvfile)
def read_csv(csvfile):
'''assuming the headers are
Date/Time
Transaction Code
Transaction Subcode
Security ID
Symbol
Buy/Sell
Open/Close
Quantity
Expiration Date
Strike
Call/Put
Price
Fees
Amount
Description
Account Reference
'''
'''creating csv with headers
x - transaction_type(money_transfer,trade)
x - account
x - date
x - symbol
x - quantity
x - stock (0/1)
x - option (0/1)
x - p_l (including fees)
'''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.extend(getFormattedRowsForMoneyMoneyMovement(day, money_movement))
# get options/common trades
all_trades = getTradesForDay(day, og_rows, all_trades)
if all_trades:
# write row
# we only write out the p/l when the trade is over (quantity 0)
formatted_rows.extend(getFormattedRowsForTrades(day, all_trades))
# remove finished trades
all_trades = remove_completed_trades(all_trades)
# TODO: persist swing trades for next times
if all_trades:
print('*** these trades are still in progress {}'.format(all_trades))
# output csv
output_formatted_csv(formatted_rows)
print('done')
def copy_rows(csvfile):
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
copied = copy.deepcopy(row)
rows.append(copied)
return rows
def getDays(og_rows):
# get the unique days in the csv
unique_days = set()
for row in og_rows:
mdy = arrow.get(row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
unique_days.add(mdy)
# sort days ascending
arrow_days = [arrow.get(u_d, 'MM/DD/YYYY') for u_d in unique_days]
arrow_days.sort()
string_days = [a_d.format('MM/DD/YYYY') for a_d in arrow_days]
print('found {} trading days'.format(len(unique_days)))
return string_days
def sameDay(mdy, og_row):
og_mdy = arrow.get(og_row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
return mdy == og_mdy
def moneyMovementForDay(day, og_rows):
money_movement = []
# get each money movement event for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
if 'money movement' in row['Transaction Code'].lower():
money_movement.append(getAmount(row))
return money_movement
def getTradesForDay(day, og_rows, trades):
# TODO: support long term swing trades (need a db ;))
# trades = {}
# group by symbol (commons or options)
'''
{
symbol:{
commons: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
options: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity'])
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def getFormattedRowsForMoneyMoneyMovement(day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) * 100
return str(round(percent_gain, 2))
def create_trade_dict():
trades = {
'commons': create_emtpy_common_or_options_dict(),
'options': create_emtpy_common_or_options_dict()
}
return trades
def create_emtpy_common_or_options_dict():
shell = {
'net_amount': 0,
'quantity': 0,
'amount_bought': 0,
'amount_sold': 0
}
return shell
def is_trade_type_empty(trade_type):
# common_zeros = [ value for key, value in trade_type['commons'].items() if value == 0]
# option_zeros = [ value for key, value in trade_type['options'].items() if value == 0]
# common_zeros.extend(option_zeros)
# return len(common_zeros) == 0
return trade_type['commons']['quantity'] == 0 and trade_type['options']['quantity'] == 0
# ======== Row funcs ===========
def amountWithFees(og_row):
fees = float(og_row['Fees'])
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price) + fees
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def getAmount(og_row):
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price)
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def isPurchase(og_row):
# negative is purchase
return getAmount(og_row) < 0
def isOption(og_row):
# is option trade?
if not og_row['Call/Put']:
return False
return True
def isCallOption(og_row):
if isOption(og_row):
if 'c' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def isPutOption(og_row):
if isOption(og_row):
if 'p' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def output_formatted_csv(formatted_rows):
|
if __name__ == "__main__":
create_formatted_csv()
| print('...creating csv')
with open('formatted_tt.csv', 'w', newline='') as out_csvfile:
fieldnames = ['transaction_type','account','date','symbol','quantity','stock','option','p_l', '%']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames)
writer.writeheader()
for formatted in formatted_rows:
writer.writerow(formatted)
print('finished writing csv')
'''create a csv with
'''
# save deposits
# save withdrawls
# save balance adustments = comes out of account andrew | identifier_body |
tasty_trade_importer.py | import os
import re
import copy
import math
from collections import OrderedDict
import arrow
import csv
# import a tasty_trade csv, and emit 1 row for every sell with the
# transaction_type(money_transfer,trade),account,date, symbol, quantity, *stock (0/1), *option (0/1), credit/debit (including fees)
# only works for daily options right now
def create_formatted_csv():
read_csvs()
def read_csvs():
path = '../../Downloads/tt/'
for filename in os.listdir(path):
if '.csv' in filename:
with open(os.path.join(path, filename)) as csvfile:
read_csv(csvfile)
def read_csv(csvfile):
'''assuming the headers are
Date/Time
Transaction Code
Transaction Subcode
Security ID
Symbol
Buy/Sell
Open/Close
Quantity
Expiration Date
Strike
Call/Put
Price
Fees
Amount
Description
Account Reference
'''
'''creating csv with headers
x - transaction_type(money_transfer,trade)
x - account
x - date
x - symbol
x - quantity
x - stock (0/1)
x - option (0/1)
x - p_l (including fees)
'''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.extend(getFormattedRowsForMoneyMoneyMovement(day, money_movement))
# get options/common trades
all_trades = getTradesForDay(day, og_rows, all_trades)
if all_trades:
# write row
# we only write out the p/l when the trade is over (quantity 0)
formatted_rows.extend(getFormattedRowsForTrades(day, all_trades))
# remove finished trades
all_trades = remove_completed_trades(all_trades)
# TODO: persist swing trades for next times
if all_trades:
print('*** these trades are still in progress {}'.format(all_trades))
# output csv
output_formatted_csv(formatted_rows)
print('done')
def copy_rows(csvfile):
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
copied = copy.deepcopy(row)
rows.append(copied)
return rows
def getDays(og_rows):
# get the unique days in the csv
unique_days = set()
for row in og_rows:
mdy = arrow.get(row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
unique_days.add(mdy)
# sort days ascending
arrow_days = [arrow.get(u_d, 'MM/DD/YYYY') for u_d in unique_days]
arrow_days.sort()
string_days = [a_d.format('MM/DD/YYYY') for a_d in arrow_days]
print('found {} trading days'.format(len(unique_days)))
return string_days
def sameDay(mdy, og_row):
og_mdy = arrow.get(og_row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
return mdy == og_mdy
def moneyMovementForDay(day, og_rows):
money_movement = []
# get each money movement event for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
if 'money movement' in row['Transaction Code'].lower():
money_movement.append(getAmount(row))
return money_movement
def getTradesForDay(day, og_rows, trades):
# TODO: support long term swing trades (need a db ;))
# trades = {}
# group by symbol (commons or options)
'''
{
symbol:{
commons: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
options: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
|
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def getFormattedRowsForMoneyMoneyMovement(day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) * 100
return str(round(percent_gain, 2))
def create_trade_dict():
trades = {
'commons': create_emtpy_common_or_options_dict(),
'options': create_emtpy_common_or_options_dict()
}
return trades
def create_emtpy_common_or_options_dict():
shell = {
'net_amount': 0,
'quantity': 0,
'amount_bought': 0,
'amount_sold': 0
}
return shell
def is_trade_type_empty(trade_type):
# common_zeros = [ value for key, value in trade_type['commons'].items() if value == 0]
# option_zeros = [ value for key, value in trade_type['options'].items() if value == 0]
# common_zeros.extend(option_zeros)
# return len(common_zeros) == 0
return trade_type['commons']['quantity'] == 0 and trade_type['options']['quantity'] == 0
# ======== Row funcs ===========
def amountWithFees(og_row):
fees = float(og_row['Fees'])
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price) + fees
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def getAmount(og_row):
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price)
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def isPurchase(og_row):
# negative is purchase
return getAmount(og_row) < 0
def isOption(og_row):
# is option trade?
if not og_row['Call/Put']:
return False
return True
def isCallOption(og_row):
if isOption(og_row):
if 'c' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def isPutOption(og_row):
if isOption(og_row):
if 'p' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def output_formatted_csv(formatted_rows):
print('...creating csv')
with open('formatted_tt.csv', 'w', newline='') as out_csvfile:
fieldnames = ['transaction_type','account','date','symbol','quantity','stock','option','p_l', '%']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames)
writer.writeheader()
for formatted in formatted_rows:
writer.writerow(formatted)
print('finished writing csv')
'''create a csv with
'''
# save deposits
# save withdrawls
# save balance adustments = comes out of account andrew
if __name__ == "__main__":
create_formatted_csv()
| netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity']) | conditional_block |
tasty_trade_importer.py | import os
import re
import copy
import math
from collections import OrderedDict
import arrow
import csv
# import a tasty_trade csv, and emit 1 row for every sell with the
# transaction_type(money_transfer,trade),account,date, symbol, quantity, *stock (0/1), *option (0/1), credit/debit (including fees)
# only works for daily options right now
def create_formatted_csv():
read_csvs()
def read_csvs():
path = '../../Downloads/tt/'
for filename in os.listdir(path):
if '.csv' in filename:
with open(os.path.join(path, filename)) as csvfile:
read_csv(csvfile)
def read_csv(csvfile):
'''assuming the headers are
Date/Time
Transaction Code
Transaction Subcode
Security ID
Symbol
Buy/Sell
Open/Close
Quantity
Expiration Date
Strike
Call/Put
Price
Fees
Amount
Description
Account Reference
'''
'''creating csv with headers
x - transaction_type(money_transfer,trade)
x - account
x - date
x - symbol
x - quantity
x - stock (0/1)
x - option (0/1)
x - p_l (including fees)
'''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.extend(getFormattedRowsForMoneyMoneyMovement(day, money_movement))
# get options/common trades
all_trades = getTradesForDay(day, og_rows, all_trades)
if all_trades:
# write row
# we only write out the p/l when the trade is over (quantity 0)
formatted_rows.extend(getFormattedRowsForTrades(day, all_trades))
# remove finished trades
all_trades = remove_completed_trades(all_trades)
# TODO: persist swing trades for next times
if all_trades:
print('*** these trades are still in progress {}'.format(all_trades))
# output csv
output_formatted_csv(formatted_rows)
print('done')
def copy_rows(csvfile):
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
copied = copy.deepcopy(row)
rows.append(copied)
return rows
def getDays(og_rows):
# get the unique days in the csv
unique_days = set()
for row in og_rows:
mdy = arrow.get(row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
unique_days.add(mdy)
# sort days ascending
arrow_days = [arrow.get(u_d, 'MM/DD/YYYY') for u_d in unique_days]
arrow_days.sort()
string_days = [a_d.format('MM/DD/YYYY') for a_d in arrow_days]
print('found {} trading days'.format(len(unique_days)))
return string_days
def sameDay(mdy, og_row):
og_mdy = arrow.get(og_row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
return mdy == og_mdy
def moneyMovementForDay(day, og_rows):
money_movement = []
# get each money movement event for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
if 'money movement' in row['Transaction Code'].lower():
money_movement.append(getAmount(row))
return money_movement
def getTradesForDay(day, og_rows, trades):
# TODO: support long term swing trades (need a db ;))
# trades = {}
# group by symbol (commons or options)
'''
{
symbol:{
commons: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
options: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity'])
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def | (day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) * 100
return str(round(percent_gain, 2))
def create_trade_dict():
trades = {
'commons': create_emtpy_common_or_options_dict(),
'options': create_emtpy_common_or_options_dict()
}
return trades
def create_emtpy_common_or_options_dict():
shell = {
'net_amount': 0,
'quantity': 0,
'amount_bought': 0,
'amount_sold': 0
}
return shell
def is_trade_type_empty(trade_type):
# common_zeros = [ value for key, value in trade_type['commons'].items() if value == 0]
# option_zeros = [ value for key, value in trade_type['options'].items() if value == 0]
# common_zeros.extend(option_zeros)
# return len(common_zeros) == 0
return trade_type['commons']['quantity'] == 0 and trade_type['options']['quantity'] == 0
# ======== Row funcs ===========
def amountWithFees(og_row):
fees = float(og_row['Fees'])
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price) + fees
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def getAmount(og_row):
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price)
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def isPurchase(og_row):
# negative is purchase
return getAmount(og_row) < 0
def isOption(og_row):
# is option trade?
if not og_row['Call/Put']:
return False
return True
def isCallOption(og_row):
if isOption(og_row):
if 'c' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def isPutOption(og_row):
if isOption(og_row):
if 'p' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def output_formatted_csv(formatted_rows):
print('...creating csv')
with open('formatted_tt.csv', 'w', newline='') as out_csvfile:
fieldnames = ['transaction_type','account','date','symbol','quantity','stock','option','p_l', '%']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames)
writer.writeheader()
for formatted in formatted_rows:
writer.writerow(formatted)
print('finished writing csv')
'''create a csv with
'''
# save deposits
# save withdrawls
# save balance adustments = comes out of account andrew
if __name__ == "__main__":
create_formatted_csv()
| getFormattedRowsForMoneyMoneyMovement | identifier_name |
redis_performance_monitor.js | /**
* This script was developed by Guberni and is part of Tellki's Monitoring Solution
*
* March, 2015
*
* Version 1.0
*
* DESCRIPTION: Monitor Redis performance
*
* SYNTAX: node redis_performance_monitor.js <METRIC_STATE> <HOST> <PORT> <PASS_WORD>
*
* EXAMPLE: node redis_performance_monitor.js "1,1,1,1,1,1,1,1,1,1,1,1" "10.10.2.5" "6379" "password"
*
* README:
* <METRIC_STATE> is generated internally by Tellki and it's only used by Tellki default monitors: 1 - metric is on; 0 - metric is off
* <HOST> redis ip address or hostname
* <PORT> redis port
* <PASS_WORD> redis password
*/
var fs = require('fs');
var redis = require('redis');
/**
* Metrics.
*/
var metrics = [];
metrics['ConnectionsPerSecond'] = { id : '1394:Connections/Sec:4', key : 'total_connections_received', ratio : true };
metrics['ConnectedClients'] = { id : '1395:Connected Clients:4', key : 'connected_clients', ratio : false };
metrics['BlockedClients'] = { id : '1396:Blocked Clients:4', key : 'blocked_clients', ratio : false };
metrics['UsedMemory'] = { id : '1397:Used Memory:4', key : 'used_memory', ratio : false };
metrics['CommandsPerSecond'] = { id : '1398:Commands/Sec:4', key : 'total_commands_processed', ratio : true };
metrics['KeyHits'] = { id : '1399:Key Hits:4', key : 'keyspace_hits', ratio : false };
metrics['KeyMisses'] = { id : '1400:Key Misses:4', key : 'keyspace_misses', ratio : false };
metrics['KeysEvicted'] = { id : '1401:Keys Evicted:4', key : 'evicted_keys', ratio : false };
metrics['KeysExpired'] = { id : '1402:Keys Expired:4', key : 'expired_keys', ratio : false };
metrics['BackgroundSaveInProgress'] = { id : '1403:Background Save:9', key : 'rdb_bgsave_in_progress', ratio : false };
metrics['ChangesSinceLastSave'] = { id : '1404:Changes since last Save:4', key : 'rdb_changes_since_last_save', ratio : false };
metrics['ConnectedSlaves'] = { id : '1405:Connected Slaves:4', key : 'connected_slaves', ratio : false };
var tempDir = '/tmp';
var sleepTime = 1000;
/**
* Entry point.
*/
(function() {
try
{
monitorInput(process.argv);
}
catch(err)
{
if(err instanceof InvalidParametersNumberError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
}).call(this);
// ############################################################################
// PARSE INPUT
/**
* Verify number of passed arguments into the script.
*/
function monitorInput(args)
{
args = args.slice(2);
if(args.length != 4)
throw new InvalidParametersNumberError();
monitorInputProcess(args);
}
/**
* Process the passed arguments and send them to monitor execution.
* Receive: arguments to be processed
*/
function monitorInputProcess(args)
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '6379';
// <USER_NAME>
var username = args[3];
username = username.length === 0 ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function output(metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimalPlaces);
}
else
{
var elapsedTime = (new Date(endMetric.timestamp).getTime() - new Date(initMetric.timestamp).getTime()) / 1000;
deltaValue = ((parseFloat(endMetric.value) - parseFloat(initMetric.value))/elapsedTime).toFixed(decimalPlaces);
}
return deltaValue;
}
/**
* Get last results if any saved
* Receive:
* - hostname or ip address
* - port
*/
function getFile(hostname, port)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
try
{
fs.readdirSync(dirPath);
var file = fs.readFileSync(filePath, 'utf8');
if (file.toString('utf8').trim())
{
return file.toString('utf8').trim();
}
else
{
return null;
}
}
catch(e)
{
return null;
}
}
/**
* Save current metrics values to be used to calculate ratios on next runs
* Receive:
* - hostname or ip address
* - port
* - retrieved result
*/
function setFile(hostname, port, json)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
if (!fs.existsSync(dirPath))
|
try
{
fs.writeFileSync(filePath, json);
}
catch(e)
{
var ex = new WriteOnTmpFileError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
// ############################################################################
// ERROR HANDLER
/**
* Used to handle errors of async functions
* Receive: Error/Exception
*/
function errorHandler(err)
{
if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof MetricNotFoundError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof CreateTmpDirError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof WriteOnTmpFileError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
// ############################################################################
// EXCEPTIONS
/**
* Exceptions used in this script.
*/
function InvalidParametersNumberError() {
this.name = "InvalidParametersNumberError";
this.message = "Wrong number of parameters.";
this.code = 3;
}
InvalidParametersNumberError.prototype = Object.create(Error.prototype);
InvalidParametersNumberError.prototype.constructor = InvalidParametersNumberError;
function InvalidAuthenticationError() {
this.name = "InvalidAuthenticationError";
this.message = "Invalid authentication.";
this.code = 2;
}
InvalidAuthenticationError.prototype = Object.create(Error.prototype);
InvalidAuthenticationError.prototype.constructor = InvalidAuthenticationError;
function UnknownHostError() {
this.name = "UnknownHostError";
this.message = "Unknown host.";
this.code = 27;
}
UnknownHostError.prototype = Object.create(Error.prototype);
UnknownHostError.prototype.constructor = UnknownHostError;
function MetricNotFoundError() {
this.name = "MetricNotFoundError";
this.message = "";
this.code = 8;
}
MetricNotFoundError.prototype = Object.create(Error.prototype);
MetricNotFoundError.prototype.constructor = MetricNotFoundError;
function CreateTmpDirError()
{
this.name = "CreateTmpDirError";
this.message = "";
this.code = 21;
}
CreateTmpDirError.prototype = Object.create(Error.prototype);
CreateTmpDirError.prototype.constructor = CreateTmpDirError;
function WriteOnTmpFileError()
{
this.name = "WriteOnTmpFileError";
this.message = "";
this.code = 22;
}
WriteOnTmpFileError.prototype = Object.create(Error.prototype);
WriteOnTmpFileError.prototype.constructor = WriteOnTmpFileError;
| {
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
} | conditional_block |
redis_performance_monitor.js | /**
* This script was developed by Guberni and is part of Tellki's Monitoring Solution
*
* March, 2015
*
* Version 1.0
*
* DESCRIPTION: Monitor Redis performance
*
* SYNTAX: node redis_performance_monitor.js <METRIC_STATE> <HOST> <PORT> <PASS_WORD>
*
* EXAMPLE: node redis_performance_monitor.js "1,1,1,1,1,1,1,1,1,1,1,1" "10.10.2.5" "6379" "password"
*
* README:
* <METRIC_STATE> is generated internally by Tellki and it's only used by Tellki default monitors: 1 - metric is on; 0 - metric is off
* <HOST> redis ip address or hostname
* <PORT> redis port
* <PASS_WORD> redis password
*/
var fs = require('fs');
var redis = require('redis');
/**
* Metrics.
*/
var metrics = [];
metrics['ConnectionsPerSecond'] = { id : '1394:Connections/Sec:4', key : 'total_connections_received', ratio : true };
metrics['ConnectedClients'] = { id : '1395:Connected Clients:4', key : 'connected_clients', ratio : false };
metrics['BlockedClients'] = { id : '1396:Blocked Clients:4', key : 'blocked_clients', ratio : false };
metrics['UsedMemory'] = { id : '1397:Used Memory:4', key : 'used_memory', ratio : false };
metrics['CommandsPerSecond'] = { id : '1398:Commands/Sec:4', key : 'total_commands_processed', ratio : true };
metrics['KeyHits'] = { id : '1399:Key Hits:4', key : 'keyspace_hits', ratio : false };
metrics['KeyMisses'] = { id : '1400:Key Misses:4', key : 'keyspace_misses', ratio : false };
metrics['KeysEvicted'] = { id : '1401:Keys Evicted:4', key : 'evicted_keys', ratio : false };
metrics['KeysExpired'] = { id : '1402:Keys Expired:4', key : 'expired_keys', ratio : false };
metrics['BackgroundSaveInProgress'] = { id : '1403:Background Save:9', key : 'rdb_bgsave_in_progress', ratio : false };
metrics['ChangesSinceLastSave'] = { id : '1404:Changes since last Save:4', key : 'rdb_changes_since_last_save', ratio : false };
metrics['ConnectedSlaves'] = { id : '1405:Connected Slaves:4', key : 'connected_slaves', ratio : false };
var tempDir = '/tmp';
var sleepTime = 1000;
/**
* Entry point.
*/
(function() {
try
{
monitorInput(process.argv);
}
catch(err)
{
if(err instanceof InvalidParametersNumberError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
}).call(this);
// ############################################################################
// PARSE INPUT
/**
* Verify number of passed arguments into the script.
*/
function monitorInput(args)
{
args = args.slice(2);
if(args.length != 4)
throw new InvalidParametersNumberError();
monitorInputProcess(args);
}
/**
* Process the passed arguments and send them to monitor execution.
* Receive: arguments to be processed
*/
function monitorInputProcess(args)
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '6379';
// <USER_NAME>
var username = args[3];
username = username.length === 0 ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function output(metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimalPlaces);
}
else
{
var elapsedTime = (new Date(endMetric.timestamp).getTime() - new Date(initMetric.timestamp).getTime()) / 1000;
deltaValue = ((parseFloat(endMetric.value) - parseFloat(initMetric.value))/elapsedTime).toFixed(decimalPlaces);
}
return deltaValue;
}
/**
* Get last results if any saved
* Receive:
* - hostname or ip address
* - port
*/
function getFile(hostname, port)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
try
{
fs.readdirSync(dirPath);
var file = fs.readFileSync(filePath, 'utf8');
if (file.toString('utf8').trim())
{
return file.toString('utf8').trim();
}
else
{
return null;
}
}
catch(e)
{
return null;
}
}
/**
* Save current metrics values to be used to calculate ratios on next runs
* Receive:
* - hostname or ip address
* - port
* - retrieved result
*/
function setFile(hostname, port, json)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
if (!fs.existsSync(dirPath))
{
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
try
{
fs.writeFileSync(filePath, json);
}
catch(e)
{
var ex = new WriteOnTmpFileError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
// ############################################################################
// ERROR HANDLER
/**
* Used to handle errors of async functions
* Receive: Error/Exception
*/
function errorHandler(err)
{
if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof MetricNotFoundError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof CreateTmpDirError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof WriteOnTmpFileError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
// ############################################################################
// EXCEPTIONS
/**
* Exceptions used in this script.
*/
function InvalidParametersNumberError() {
this.name = "InvalidParametersNumberError";
this.message = "Wrong number of parameters.";
this.code = 3;
}
InvalidParametersNumberError.prototype = Object.create(Error.prototype);
InvalidParametersNumberError.prototype.constructor = InvalidParametersNumberError;
function InvalidAuthenticationError() { | this.name = "InvalidAuthenticationError";
this.message = "Invalid authentication.";
this.code = 2;
}
InvalidAuthenticationError.prototype = Object.create(Error.prototype);
InvalidAuthenticationError.prototype.constructor = InvalidAuthenticationError;
function UnknownHostError() {
this.name = "UnknownHostError";
this.message = "Unknown host.";
this.code = 27;
}
UnknownHostError.prototype = Object.create(Error.prototype);
UnknownHostError.prototype.constructor = UnknownHostError;
function MetricNotFoundError() {
this.name = "MetricNotFoundError";
this.message = "";
this.code = 8;
}
MetricNotFoundError.prototype = Object.create(Error.prototype);
MetricNotFoundError.prototype.constructor = MetricNotFoundError;
function CreateTmpDirError()
{
this.name = "CreateTmpDirError";
this.message = "";
this.code = 21;
}
CreateTmpDirError.prototype = Object.create(Error.prototype);
CreateTmpDirError.prototype.constructor = CreateTmpDirError;
function WriteOnTmpFileError()
{
this.name = "WriteOnTmpFileError";
this.message = "";
this.code = 22;
}
WriteOnTmpFileError.prototype = Object.create(Error.prototype);
WriteOnTmpFileError.prototype.constructor = WriteOnTmpFileError; | random_line_split |
|
redis_performance_monitor.js | /**
* This script was developed by Guberni and is part of Tellki's Monitoring Solution
*
* March, 2015
*
* Version 1.0
*
* DESCRIPTION: Monitor Redis performance
*
* SYNTAX: node redis_performance_monitor.js <METRIC_STATE> <HOST> <PORT> <PASS_WORD>
*
* EXAMPLE: node redis_performance_monitor.js "1,1,1,1,1,1,1,1,1,1,1,1" "10.10.2.5" "6379" "password"
*
* README:
* <METRIC_STATE> is generated internally by Tellki and it's only used by Tellki default monitors: 1 - metric is on; 0 - metric is off
* <HOST> redis ip address or hostname
* <PORT> redis port
* <PASS_WORD> redis password
*/
var fs = require('fs');
var redis = require('redis');
/**
* Metrics.
*/
var metrics = [];
metrics['ConnectionsPerSecond'] = { id : '1394:Connections/Sec:4', key : 'total_connections_received', ratio : true };
metrics['ConnectedClients'] = { id : '1395:Connected Clients:4', key : 'connected_clients', ratio : false };
metrics['BlockedClients'] = { id : '1396:Blocked Clients:4', key : 'blocked_clients', ratio : false };
metrics['UsedMemory'] = { id : '1397:Used Memory:4', key : 'used_memory', ratio : false };
metrics['CommandsPerSecond'] = { id : '1398:Commands/Sec:4', key : 'total_commands_processed', ratio : true };
metrics['KeyHits'] = { id : '1399:Key Hits:4', key : 'keyspace_hits', ratio : false };
metrics['KeyMisses'] = { id : '1400:Key Misses:4', key : 'keyspace_misses', ratio : false };
metrics['KeysEvicted'] = { id : '1401:Keys Evicted:4', key : 'evicted_keys', ratio : false };
metrics['KeysExpired'] = { id : '1402:Keys Expired:4', key : 'expired_keys', ratio : false };
metrics['BackgroundSaveInProgress'] = { id : '1403:Background Save:9', key : 'rdb_bgsave_in_progress', ratio : false };
metrics['ChangesSinceLastSave'] = { id : '1404:Changes since last Save:4', key : 'rdb_changes_since_last_save', ratio : false };
metrics['ConnectedSlaves'] = { id : '1405:Connected Slaves:4', key : 'connected_slaves', ratio : false };
var tempDir = '/tmp';
var sleepTime = 1000;
/**
* Entry point.
*/
(function() {
try
{
monitorInput(process.argv);
}
catch(err)
{
if(err instanceof InvalidParametersNumberError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
}).call(this);
// ############################################################################
// PARSE INPUT
/**
* Verify number of passed arguments into the script.
*/
function monitorInput(args)
{
args = args.slice(2);
if(args.length != 4)
throw new InvalidParametersNumberError();
monitorInputProcess(args);
}
/**
* Process the passed arguments and send them to monitor execution.
* Receive: arguments to be processed
*/
function monitorInputProcess(args)
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '6379';
// <USER_NAME>
var username = args[3];
username = username.length === 0 ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function output(metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimalPlaces);
}
else
{
var elapsedTime = (new Date(endMetric.timestamp).getTime() - new Date(initMetric.timestamp).getTime()) / 1000;
deltaValue = ((parseFloat(endMetric.value) - parseFloat(initMetric.value))/elapsedTime).toFixed(decimalPlaces);
}
return deltaValue;
}
/**
* Get last results if any saved
* Receive:
* - hostname or ip address
* - port
*/
function getFile(hostname, port)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
try
{
fs.readdirSync(dirPath);
var file = fs.readFileSync(filePath, 'utf8');
if (file.toString('utf8').trim())
{
return file.toString('utf8').trim();
}
else
{
return null;
}
}
catch(e)
{
return null;
}
}
/**
* Save current metrics values to be used to calculate ratios on next runs
* Receive:
* - hostname or ip address
* - port
* - retrieved result
*/
function setFile(hostname, port, json)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
if (!fs.existsSync(dirPath))
{
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
try
{
fs.writeFileSync(filePath, json);
}
catch(e)
{
var ex = new WriteOnTmpFileError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
// ############################################################################
// ERROR HANDLER
/**
* Used to handle errors of async functions
* Receive: Error/Exception
*/
function errorHandler(err)
|
// ############################################################################
// EXCEPTIONS
/**
* Exceptions used in this script.
*/
function InvalidParametersNumberError() {
this.name = "InvalidParametersNumberError";
this.message = "Wrong number of parameters.";
this.code = 3;
}
InvalidParametersNumberError.prototype = Object.create(Error.prototype);
InvalidParametersNumberError.prototype.constructor = InvalidParametersNumberError;
function InvalidAuthenticationError() {
this.name = "InvalidAuthenticationError";
this.message = "Invalid authentication.";
this.code = 2;
}
InvalidAuthenticationError.prototype = Object.create(Error.prototype);
InvalidAuthenticationError.prototype.constructor = InvalidAuthenticationError;
function UnknownHostError() {
this.name = "UnknownHostError";
this.message = "Unknown host.";
this.code = 27;
}
UnknownHostError.prototype = Object.create(Error.prototype);
UnknownHostError.prototype.constructor = UnknownHostError;
function MetricNotFoundError() {
this.name = "MetricNotFoundError";
this.message = "";
this.code = 8;
}
MetricNotFoundError.prototype = Object.create(Error.prototype);
MetricNotFoundError.prototype.constructor = MetricNotFoundError;
function CreateTmpDirError()
{
this.name = "CreateTmpDirError";
this.message = "";
this.code = 21;
}
CreateTmpDirError.prototype = Object.create(Error.prototype);
CreateTmpDirError.prototype.constructor = CreateTmpDirError;
function WriteOnTmpFileError()
{
this.name = "WriteOnTmpFileError";
this.message = "";
this.code = 22;
}
WriteOnTmpFileError.prototype = Object.create(Error.prototype);
WriteOnTmpFileError.prototype.constructor = WriteOnTmpFileError;
| {
if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof MetricNotFoundError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof CreateTmpDirError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof WriteOnTmpFileError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
} | identifier_body |
redis_performance_monitor.js | /**
* This script was developed by Guberni and is part of Tellki's Monitoring Solution
*
* March, 2015
*
* Version 1.0
*
* DESCRIPTION: Monitor Redis performance
*
* SYNTAX: node redis_performance_monitor.js <METRIC_STATE> <HOST> <PORT> <PASS_WORD>
*
* EXAMPLE: node redis_performance_monitor.js "1,1,1,1,1,1,1,1,1,1,1,1" "10.10.2.5" "6379" "password"
*
* README:
* <METRIC_STATE> is generated internally by Tellki and it's only used by Tellki default monitors: 1 - metric is on; 0 - metric is off
* <HOST> redis ip address or hostname
* <PORT> redis port
* <PASS_WORD> redis password
*/
var fs = require('fs');
var redis = require('redis');
/**
* Metrics.
*/
var metrics = [];
metrics['ConnectionsPerSecond'] = { id : '1394:Connections/Sec:4', key : 'total_connections_received', ratio : true };
metrics['ConnectedClients'] = { id : '1395:Connected Clients:4', key : 'connected_clients', ratio : false };
metrics['BlockedClients'] = { id : '1396:Blocked Clients:4', key : 'blocked_clients', ratio : false };
metrics['UsedMemory'] = { id : '1397:Used Memory:4', key : 'used_memory', ratio : false };
metrics['CommandsPerSecond'] = { id : '1398:Commands/Sec:4', key : 'total_commands_processed', ratio : true };
metrics['KeyHits'] = { id : '1399:Key Hits:4', key : 'keyspace_hits', ratio : false };
metrics['KeyMisses'] = { id : '1400:Key Misses:4', key : 'keyspace_misses', ratio : false };
metrics['KeysEvicted'] = { id : '1401:Keys Evicted:4', key : 'evicted_keys', ratio : false };
metrics['KeysExpired'] = { id : '1402:Keys Expired:4', key : 'expired_keys', ratio : false };
metrics['BackgroundSaveInProgress'] = { id : '1403:Background Save:9', key : 'rdb_bgsave_in_progress', ratio : false };
metrics['ChangesSinceLastSave'] = { id : '1404:Changes since last Save:4', key : 'rdb_changes_since_last_save', ratio : false };
metrics['ConnectedSlaves'] = { id : '1405:Connected Slaves:4', key : 'connected_slaves', ratio : false };
var tempDir = '/tmp';
var sleepTime = 1000;
/**
* Entry point.
*/
(function() {
try
{
monitorInput(process.argv);
}
catch(err)
{
if(err instanceof InvalidParametersNumberError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
}).call(this);
// ############################################################################
// PARSE INPUT
/**
* Verify number of passed arguments into the script.
*/
function monitorInput(args)
{
args = args.slice(2);
if(args.length != 4)
throw new InvalidParametersNumberError();
monitorInputProcess(args);
}
/**
* Process the passed arguments and send them to monitor execution.
* Receive: arguments to be processed
*/
function monitorInputProcess(args)
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '6379';
// <USER_NAME>
var username = args[3];
username = username.length === 0 ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function | (metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimalPlaces);
}
else
{
var elapsedTime = (new Date(endMetric.timestamp).getTime() - new Date(initMetric.timestamp).getTime()) / 1000;
deltaValue = ((parseFloat(endMetric.value) - parseFloat(initMetric.value))/elapsedTime).toFixed(decimalPlaces);
}
return deltaValue;
}
/**
* Get last results if any saved
* Receive:
* - hostname or ip address
* - port
*/
function getFile(hostname, port)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
try
{
fs.readdirSync(dirPath);
var file = fs.readFileSync(filePath, 'utf8');
if (file.toString('utf8').trim())
{
return file.toString('utf8').trim();
}
else
{
return null;
}
}
catch(e)
{
return null;
}
}
/**
* Save current metrics values to be used to calculate ratios on next runs
* Receive:
* - hostname or ip address
* - port
* - retrieved result
*/
function setFile(hostname, port, json)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
if (!fs.existsSync(dirPath))
{
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
try
{
fs.writeFileSync(filePath, json);
}
catch(e)
{
var ex = new WriteOnTmpFileError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
// ############################################################################
// ERROR HANDLER
/**
* Used to handle errors of async functions
* Receive: Error/Exception
*/
function errorHandler(err)
{
if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof MetricNotFoundError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof CreateTmpDirError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof WriteOnTmpFileError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
// ############################################################################
// EXCEPTIONS
/**
* Exceptions used in this script.
*/
function InvalidParametersNumberError() {
this.name = "InvalidParametersNumberError";
this.message = "Wrong number of parameters.";
this.code = 3;
}
InvalidParametersNumberError.prototype = Object.create(Error.prototype);
InvalidParametersNumberError.prototype.constructor = InvalidParametersNumberError;
function InvalidAuthenticationError() {
this.name = "InvalidAuthenticationError";
this.message = "Invalid authentication.";
this.code = 2;
}
InvalidAuthenticationError.prototype = Object.create(Error.prototype);
InvalidAuthenticationError.prototype.constructor = InvalidAuthenticationError;
function UnknownHostError() {
this.name = "UnknownHostError";
this.message = "Unknown host.";
this.code = 27;
}
UnknownHostError.prototype = Object.create(Error.prototype);
UnknownHostError.prototype.constructor = UnknownHostError;
function MetricNotFoundError() {
this.name = "MetricNotFoundError";
this.message = "";
this.code = 8;
}
MetricNotFoundError.prototype = Object.create(Error.prototype);
MetricNotFoundError.prototype.constructor = MetricNotFoundError;
function CreateTmpDirError()
{
this.name = "CreateTmpDirError";
this.message = "";
this.code = 21;
}
CreateTmpDirError.prototype = Object.create(Error.prototype);
CreateTmpDirError.prototype.constructor = CreateTmpDirError;
function WriteOnTmpFileError()
{
this.name = "WriteOnTmpFileError";
this.message = "";
this.code = 22;
}
WriteOnTmpFileError.prototype = Object.create(Error.prototype);
WriteOnTmpFileError.prototype.constructor = WriteOnTmpFileError;
| output | identifier_name |
script.js | //global var to store the state name
var state_name_array=[];
//change to 1 if user search by specific data because if user search by specific date it dont has active element
var no_active=0;
const STATE_CODES={
AN: "Andaman and Nicobar Islands",
AP: "Andhra Pradesh",
AR: "Arunachal Pradesh",
AS: "Assam",
BR: "Bihar",
CH: "Chandigarh",
CT: "Chhattisgarh",
DL: "Delhi",
DN: "Dadra and Nagar Haveli and Daman and Diu",
GA: "Goa",
GJ: "Gujarat",
HP: "Himachal Pradesh",
HR: "Haryana",
JH: "Jharkhand",
JK: "Jammu and Kashmir",
KA: "Karnataka",
KL: "Kerala",
LA: "Ladakh",
LD: "Lakshadweep",
MH: "Maharashtra",
ML: "Meghalaya",
MN: "Manipur",
MP: "Madhya Pradesh",
MZ: "Mizoram",
NL: "Nagaland",
OR: "Odisha",
PB: "Punjab",
PY: "Puducherry",
RJ: "Rajasthan",
SK: "Sikkim",
TG: "Telangana",
TN: "Tamil Nadu",
TR: "Tripura",
UN: "State Unassigned",
UP: "Uttar Pradesh",
UT: "Uttarakhand",
WB: "West Bengal",
};
const API_CLOSED_DATE="2021-02-01";
function getAllData(isstatus=0) {
/*
getting data from api which has "cases_time_series" "statewise" and "tested" details we only need statewise details
*/
var alldatalink="https://api.covid19india.org/data.json";
try
{
$.getJSON(alldatalink,function(data){
// storing prev day val for calculation of total value in india
let prev_index=data["cases_time_series"].length;
let prev_confirm=data["cases_time_series"][prev_index-1]["totalconfirmed"];
let prev_recoverd=data["cases_time_series"][prev_index-1]["totalrecovered"];
let prev_deaths=data["cases_time_series"][prev_index-1]["totaldeceased"];
//iterating only statewise object
$.each(data["statewise"],function(index,data){
//storing value for calculation
let date=data["lastupdatedtime"].split(" ")[0];
let active=data["active"];
let confirmed=data["confirmed"];
let recovered=data["recovered"];
let deaths=data["deaths"];
//adding the today status of covid
if(data["state"]==="Total")
{
//call function with isstatus =1
addDataToTable([date,active,confirmed,recovered,deaths],[prev_confirm,prev_recoverd,prev_deaths],1);
}
//pushing the state details to table
else if(!isstatus)
{
//to change date fromat
date=date.split("/").reverse().join("-");
//call function to get prev day data
getPrevDayData(date,data["statecode"],[date,active,confirmed,recovered,deaths,data["state"]])
}
})
}).fail(handleError);
}
catch(error)
{
handleError();
}
}
function getPrevDayData(tdate,statecode,data_array) {
let prevdate=tdate.split("-");
//creating date object and month start from zero so minus one
prevdate=new Date(prevdate[0],prevdate[1]-1,prevdate[2]);
//calculating prev day by sub one
prevdate.setDate(prevdate.getDate()-1);
// it return day/month/year, hr:min:sec AM
prevdate=prevdate.toLocaleString();
//spilting date and time
prevdate=prevdate.split(",")[0];
// convert day/month/year day month year
prevdate=prevdate.split("/");
//if day is single digit adding zero to it
if(prevdate[0].length==1)
{
prevdate[0]="0"+prevdate[0]
}
//if date is single digit adding zero to it
if(prevdate[1].length==1)
{
prevdate[1]="0"+prevdate[1]
}
//changing date format to year/month/date
prevdate=prevdate[2]+"-"+prevdate[0]+"-"+prevdate[1];
let link="https://api.covid19india.org/v3/data-"+prevdate+".json";
//store prev day data in array
let prev_data_array=[]
fetch(link).then((response) => {
return response.json()
}).then((data) =>{
if(data[statecode]!==undefined)
{
if(data[statecode]["total"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["confirmed"])
}
if(data[statecode]["total"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["recovered"])
}
//if none one died pushing zero
if(data[statecode]["total"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["deceased"])
}
//call functio to add data to tabel isstatus =0
addDataToTable(data_array,prev_data_array,0);
}
}).catch(handleError);
}
function addDataToTable(data_array,prev_data_array,isstatus,isold=0)
{
//retrive the valu from array
let date=data_array[0];
let active=data_array[1];
let confirmed=data_array[2];
let recovered=data_array[3];
let deaths=data_array[4];
let prev_confirm=prev_data_array[0]
let prev_recoverd=prev_data_array[1]
let prev_deaths=prev_data_array[2]
if(!isold)
{
//calculating the difference
var diff_confirm=(confirmed-prev_confirm);
var diff_deaths=(deaths-prev_deaths);
var diff_recovered=(recovered-prev_recoverd);
}
else
{
var diff_confirm=prev_confirm;
var diff_deaths=prev_deaths;
var diff_recovered =prev_recoverd;
}
// img src
let up_img_src="image/upimg.png";
let down_img_src="image/downimg.png";
let upgreen_img_src="image/upgreenimg.png"
let confirm_pic,active_element,recovered_pic,deaths_pic;
let increases="<br><img src="+up_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
let decreases="<br><img src="+down_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
//checking if it add to satus or table
if(isstatus==1)
{
//checking for diff and adding corresponding image for it
if(diff_confirm>0)
{
$("#confirmed-no").append(confirmed+increases+diff_confirm+"</span>");
}
//check if is negative to avoid adding img for zero
else if (diff_confirm<=0)
{
$("#confirmed-no").append(confirmed+decreases+Math.abs(diff_confirm)+"</span>");
}
if(diff_deaths>0)
{
$("#deaths-no").append(deaths+increases+diff_deaths+"</span>");
}
else if(diff_deaths<=0)
{
$("#deaths-no").append(deaths+decreases+Math.abs(diff_deaths)+"</span>");
}
if(diff_recovered>0)
{
//setting uparrow img for recovered
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
$("#recovered-no").append(recovered+increases+diff_recovered+"</span>");
}
else if (diff_recovered<=0)
{
$("#recovered-no").append(recovered+decreases+Math.abs(diff_recovered)+"</span>");
}
$("#active-no").append(active);
$("#status-date").text(date);
}
//add data to table
else
{
if(diff_confirm>0)
{
confirm_pic=confirmed+increases+diff_confirm;
}
else if(diff_confirm<=0)
{
confirm_pic=confirmed+decreases+Math.abs(diff_confirm);
}
if(diff_deaths>0)
{
deaths_pic=deaths+increases+Math.abs(diff_deaths);
}
else if(diff_deaths<=0)
{
deaths_pic=deaths+decreases+Math.abs(diff_deaths);
}
if(diff_recovered>0)
{
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
recovered_pic=recovered+increases+diff_recovered;
}
else if(diff_recovered<=0)
{
recovered_pic=recovered+decreases+Math.abs(diff_recovered);
}
//checking if active persent or not
if(active)
{
active_element= "<div class='active-state'>Active<br><span id='active-no'>"+active+"</span></div>"
}
else{
active_element="<div></div>";
}
//retriving state value
let state_name=data_array[5]?data_array[5]:"unknown state";
state_name_array.push(state_name.toLowerCase())
let class_name=state_name.split(" ")[0].toLowerCase();
let state_html="<div class='state-div "+class_name+"' > \
<p class='state-name' onclick='goToState(event)'>"+state_name+"<br>"+date+"</p> \
<div class='state-data d-flex flex-wrap justify-content-between' > \
"+active_element+
"<div class='confirmed-state'>Confirmed<br><span id='active-no'>"+confirm_pic+"</span></div>\
<div class='recovered-state'>Recovered<br><span id='active-no'>"+recovered_pic+"</span></div>\
<div class='deaths-state'>Deaths<br><span id='active-no'>"+deaths_pic+"</span></div><br>\
</div>"
$(".state-container").append(state_html);
}
}
function goToState(event)
{
//it return total text
let array=event.target.nextSibling.nextSibling.innerText.split("\n");
debugger;
let tactive=tconfirmed=trecovered=tdeaths=diff_confirm=diff_recovered=diff_deaths=0;
if(no_active)
{
tactive="unknown";
index=2
}
else{
if(array[1])
{
tactive=array[1];
}
index=0;
}
if(array[3-index])
{
tconfirmed=array[3-index];
}
if(array[4-index])
{
diff_confirm=array[4-index];
}
if(array[6-index])
{
trecovered=array[6-index];
}
if(array[7-index])
{
diff_recovered=array[7-index];
}
if(array[9-index])
{
tdeaths=array[9-index];
}
if(array[10-index])
{
diff_deaths=array[10-index];
}
//store all data as single object
let state_data={"tactive":tactive,"tconfirmed":tconfirmed,"trecovered":trecovered,"tdeaths":tdeaths,
"diff_confirm":diff_confirm,"diff_recovered":diff_recovered,"diff_deaths":diff_deaths};
//getting state
let state_name =event.target.innerText.split("\n")[0];
let prevdate= event.target.innerText.split("\n")[1];
sessionStorage.setItem("state",state_name);
sessionStorage.setItem("prevdate",prevdate);
sessionStorage.setItem("state_data",JSON.stringify(state_data));
window.location="state.html"
}
function searchState()
{
let search_val=$("#search_input").val().toLowerCase();
$(".search_result").css("display","none");
let is_has;
if(state_name_array.length)
{
is_has=false;
}
else
{
is_has=true;
}
$.each(state_name_array,function(index,state_name){
//split the class name if it has space
let class_name=state_name.split(" ")[0]
if(state_name.startsWith(search_val))
{
is_has=true;
$("."+class_name).addClass("show");
$("."+class_name).removeClass("hide");
}
else
{
$("."+class_name).addClass("hide");
}
});
if(!is_has)
{
$(".search_result").css("display","flex");
}
}
function search()
{
let search_button=document.querySelector("#search_button");
search_button.addEventListener("click",async function(){
let loading=document.querySelector(".loading__container");
loading.style["display"]="flex";
let date=$("#search_date").val();
//if user requested date greater than API CLOSED DATE handle error
if(new Date(date)<new Date(API_CLOSED_DATE))
{
await getSpecificData(date);
}
else
{
loading.style["display"]="none";
handleError();
}
});
}
function | (date)
{
//setting no active to 1
no_active=1;
$(".state-container").empty();
document.querySelector(".state-container").style.display="flex";
document.querySelector(".error_container").style.display="none";
let link="https://api.covid19india.org/v3/data-"+date+".json";
$.getJSON(link,function(datas){
for(data in datas){
let data_array=[];
let prev_data_array=[];
data_array.push(date);
//pushing active has no active
data_array.push(0)
if(datas[data]["delta"])
{
if(datas[data]["delta"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["confirmed"])
}
if(datas[data]["delta"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["recovered"])
}
//if none one died pushing zero
if(datas[data]["delta"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["deceased"])
}
}
if(datas[data]["total"])
{
if(datas[data]["total"]["confirmed"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["confirmed"])
}
if(datas[data]["total"]["recovered"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["recovered"])
}
//if none one died pushing zero
if(datas[data]["total"]["deceased"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["deceased"])
}
}
//storing the coressponding sates name
data_array[5]=STATE_CODES[data]?STATE_CODES[data]:"state code is "+data;
addDataToTable(data_array,prev_data_array,0,1);
}
//calling the search state function to filter if user entered
let loading=document.querySelector(".loading__container");
loading.style["display"]="none";
searchState();
}).catch(handleError);
}
function handleError()
{
document.querySelector(".state-container").style.display="none";
document.querySelector(".error_container").style.display="flex";
document.querySelector(".loading__container").style.display="none";
}
search();
//added this at api closed on feb 6 2:47
if(new Date()<new Date(API_CLOSED_DATE))
{
getAllData();
}
else
{
handleError();
}
//calling only for getting satus
getAllData(1); | getSpecificData | identifier_name |
script.js | //global var to store the state name
var state_name_array=[];
//change to 1 if user search by specific data because if user search by specific date it dont has active element
var no_active=0;
const STATE_CODES={
AN: "Andaman and Nicobar Islands",
AP: "Andhra Pradesh",
AR: "Arunachal Pradesh",
AS: "Assam",
BR: "Bihar",
CH: "Chandigarh",
CT: "Chhattisgarh",
DL: "Delhi",
DN: "Dadra and Nagar Haveli and Daman and Diu",
GA: "Goa",
GJ: "Gujarat",
HP: "Himachal Pradesh",
HR: "Haryana",
JH: "Jharkhand",
JK: "Jammu and Kashmir",
KA: "Karnataka",
KL: "Kerala",
LA: "Ladakh",
LD: "Lakshadweep",
MH: "Maharashtra",
ML: "Meghalaya",
MN: "Manipur",
MP: "Madhya Pradesh",
MZ: "Mizoram",
NL: "Nagaland",
OR: "Odisha",
PB: "Punjab",
PY: "Puducherry",
RJ: "Rajasthan",
SK: "Sikkim",
TG: "Telangana",
TN: "Tamil Nadu",
TR: "Tripura",
UN: "State Unassigned",
UP: "Uttar Pradesh",
UT: "Uttarakhand",
WB: "West Bengal",
};
const API_CLOSED_DATE="2021-02-01";
function getAllData(isstatus=0) {
/*
getting data from api which has "cases_time_series" "statewise" and "tested" details we only need statewise details
*/
var alldatalink="https://api.covid19india.org/data.json";
try
{
$.getJSON(alldatalink,function(data){
// storing prev day val for calculation of total value in india
let prev_index=data["cases_time_series"].length;
let prev_confirm=data["cases_time_series"][prev_index-1]["totalconfirmed"];
let prev_recoverd=data["cases_time_series"][prev_index-1]["totalrecovered"];
let prev_deaths=data["cases_time_series"][prev_index-1]["totaldeceased"];
//iterating only statewise object
$.each(data["statewise"],function(index,data){
//storing value for calculation
let date=data["lastupdatedtime"].split(" ")[0];
let active=data["active"];
let confirmed=data["confirmed"];
let recovered=data["recovered"];
let deaths=data["deaths"];
//adding the today status of covid
if(data["state"]==="Total")
{
//call function with isstatus =1
addDataToTable([date,active,confirmed,recovered,deaths],[prev_confirm,prev_recoverd,prev_deaths],1);
}
//pushing the state details to table
else if(!isstatus)
{
//to change date fromat
date=date.split("/").reverse().join("-");
//call function to get prev day data
getPrevDayData(date,data["statecode"],[date,active,confirmed,recovered,deaths,data["state"]])
}
})
}).fail(handleError);
}
catch(error)
{
handleError();
}
}
function getPrevDayData(tdate,statecode,data_array) {
let prevdate=tdate.split("-");
//creating date object and month start from zero so minus one
prevdate=new Date(prevdate[0],prevdate[1]-1,prevdate[2]);
//calculating prev day by sub one
prevdate.setDate(prevdate.getDate()-1);
// it return day/month/year, hr:min:sec AM
prevdate=prevdate.toLocaleString();
//spilting date and time
prevdate=prevdate.split(",")[0];
// convert day/month/year day month year
prevdate=prevdate.split("/");
//if day is single digit adding zero to it
if(prevdate[0].length==1)
{
prevdate[0]="0"+prevdate[0]
}
//if date is single digit adding zero to it
if(prevdate[1].length==1)
{
prevdate[1]="0"+prevdate[1]
}
//changing date format to year/month/date
prevdate=prevdate[2]+"-"+prevdate[0]+"-"+prevdate[1];
let link="https://api.covid19india.org/v3/data-"+prevdate+".json";
//store prev day data in array
let prev_data_array=[]
fetch(link).then((response) => {
return response.json()
}).then((data) =>{
if(data[statecode]!==undefined)
{
if(data[statecode]["total"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["confirmed"])
}
if(data[statecode]["total"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["recovered"])
}
//if none one died pushing zero
if(data[statecode]["total"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["deceased"])
}
//call functio to add data to tabel isstatus =0
addDataToTable(data_array,prev_data_array,0);
}
}).catch(handleError);
}
function addDataToTable(data_array,prev_data_array,isstatus,isold=0)
{ | let date=data_array[0];
let active=data_array[1];
let confirmed=data_array[2];
let recovered=data_array[3];
let deaths=data_array[4];
let prev_confirm=prev_data_array[0]
let prev_recoverd=prev_data_array[1]
let prev_deaths=prev_data_array[2]
if(!isold)
{
//calculating the difference
var diff_confirm=(confirmed-prev_confirm);
var diff_deaths=(deaths-prev_deaths);
var diff_recovered=(recovered-prev_recoverd);
}
else
{
var diff_confirm=prev_confirm;
var diff_deaths=prev_deaths;
var diff_recovered =prev_recoverd;
}
// img src
let up_img_src="image/upimg.png";
let down_img_src="image/downimg.png";
let upgreen_img_src="image/upgreenimg.png"
let confirm_pic,active_element,recovered_pic,deaths_pic;
let increases="<br><img src="+up_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
let decreases="<br><img src="+down_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
//checking if it add to satus or table
if(isstatus==1)
{
//checking for diff and adding corresponding image for it
if(diff_confirm>0)
{
$("#confirmed-no").append(confirmed+increases+diff_confirm+"</span>");
}
//check if is negative to avoid adding img for zero
else if (diff_confirm<=0)
{
$("#confirmed-no").append(confirmed+decreases+Math.abs(diff_confirm)+"</span>");
}
if(diff_deaths>0)
{
$("#deaths-no").append(deaths+increases+diff_deaths+"</span>");
}
else if(diff_deaths<=0)
{
$("#deaths-no").append(deaths+decreases+Math.abs(diff_deaths)+"</span>");
}
if(diff_recovered>0)
{
//setting uparrow img for recovered
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
$("#recovered-no").append(recovered+increases+diff_recovered+"</span>");
}
else if (diff_recovered<=0)
{
$("#recovered-no").append(recovered+decreases+Math.abs(diff_recovered)+"</span>");
}
$("#active-no").append(active);
$("#status-date").text(date);
}
//add data to table
else
{
if(diff_confirm>0)
{
confirm_pic=confirmed+increases+diff_confirm;
}
else if(diff_confirm<=0)
{
confirm_pic=confirmed+decreases+Math.abs(diff_confirm);
}
if(diff_deaths>0)
{
deaths_pic=deaths+increases+Math.abs(diff_deaths);
}
else if(diff_deaths<=0)
{
deaths_pic=deaths+decreases+Math.abs(diff_deaths);
}
if(diff_recovered>0)
{
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
recovered_pic=recovered+increases+diff_recovered;
}
else if(diff_recovered<=0)
{
recovered_pic=recovered+decreases+Math.abs(diff_recovered);
}
//checking if active persent or not
if(active)
{
active_element= "<div class='active-state'>Active<br><span id='active-no'>"+active+"</span></div>"
}
else{
active_element="<div></div>";
}
//retriving state value
let state_name=data_array[5]?data_array[5]:"unknown state";
state_name_array.push(state_name.toLowerCase())
let class_name=state_name.split(" ")[0].toLowerCase();
let state_html="<div class='state-div "+class_name+"' > \
<p class='state-name' onclick='goToState(event)'>"+state_name+"<br>"+date+"</p> \
<div class='state-data d-flex flex-wrap justify-content-between' > \
"+active_element+
"<div class='confirmed-state'>Confirmed<br><span id='active-no'>"+confirm_pic+"</span></div>\
<div class='recovered-state'>Recovered<br><span id='active-no'>"+recovered_pic+"</span></div>\
<div class='deaths-state'>Deaths<br><span id='active-no'>"+deaths_pic+"</span></div><br>\
</div>"
$(".state-container").append(state_html);
}
}
function goToState(event)
{
//it return total text
let array=event.target.nextSibling.nextSibling.innerText.split("\n");
debugger;
let tactive=tconfirmed=trecovered=tdeaths=diff_confirm=diff_recovered=diff_deaths=0;
if(no_active)
{
tactive="unknown";
index=2
}
else{
if(array[1])
{
tactive=array[1];
}
index=0;
}
if(array[3-index])
{
tconfirmed=array[3-index];
}
if(array[4-index])
{
diff_confirm=array[4-index];
}
if(array[6-index])
{
trecovered=array[6-index];
}
if(array[7-index])
{
diff_recovered=array[7-index];
}
if(array[9-index])
{
tdeaths=array[9-index];
}
if(array[10-index])
{
diff_deaths=array[10-index];
}
//store all data as single object
let state_data={"tactive":tactive,"tconfirmed":tconfirmed,"trecovered":trecovered,"tdeaths":tdeaths,
"diff_confirm":diff_confirm,"diff_recovered":diff_recovered,"diff_deaths":diff_deaths};
//getting state
let state_name =event.target.innerText.split("\n")[0];
let prevdate= event.target.innerText.split("\n")[1];
sessionStorage.setItem("state",state_name);
sessionStorage.setItem("prevdate",prevdate);
sessionStorage.setItem("state_data",JSON.stringify(state_data));
window.location="state.html"
}
function searchState()
{
let search_val=$("#search_input").val().toLowerCase();
$(".search_result").css("display","none");
let is_has;
if(state_name_array.length)
{
is_has=false;
}
else
{
is_has=true;
}
$.each(state_name_array,function(index,state_name){
//split the class name if it has space
let class_name=state_name.split(" ")[0]
if(state_name.startsWith(search_val))
{
is_has=true;
$("."+class_name).addClass("show");
$("."+class_name).removeClass("hide");
}
else
{
$("."+class_name).addClass("hide");
}
});
if(!is_has)
{
$(".search_result").css("display","flex");
}
}
function search()
{
let search_button=document.querySelector("#search_button");
search_button.addEventListener("click",async function(){
let loading=document.querySelector(".loading__container");
loading.style["display"]="flex";
let date=$("#search_date").val();
//if user requested date greater than API CLOSED DATE handle error
if(new Date(date)<new Date(API_CLOSED_DATE))
{
await getSpecificData(date);
}
else
{
loading.style["display"]="none";
handleError();
}
});
}
function getSpecificData(date)
{
//setting no active to 1
no_active=1;
$(".state-container").empty();
document.querySelector(".state-container").style.display="flex";
document.querySelector(".error_container").style.display="none";
let link="https://api.covid19india.org/v3/data-"+date+".json";
$.getJSON(link,function(datas){
for(data in datas){
let data_array=[];
let prev_data_array=[];
data_array.push(date);
//pushing active has no active
data_array.push(0)
if(datas[data]["delta"])
{
if(datas[data]["delta"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["confirmed"])
}
if(datas[data]["delta"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["recovered"])
}
//if none one died pushing zero
if(datas[data]["delta"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["deceased"])
}
}
if(datas[data]["total"])
{
if(datas[data]["total"]["confirmed"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["confirmed"])
}
if(datas[data]["total"]["recovered"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["recovered"])
}
//if none one died pushing zero
if(datas[data]["total"]["deceased"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["deceased"])
}
}
//storing the coressponding sates name
data_array[5]=STATE_CODES[data]?STATE_CODES[data]:"state code is "+data;
addDataToTable(data_array,prev_data_array,0,1);
}
//calling the search state function to filter if user entered
let loading=document.querySelector(".loading__container");
loading.style["display"]="none";
searchState();
}).catch(handleError);
}
function handleError()
{
document.querySelector(".state-container").style.display="none";
document.querySelector(".error_container").style.display="flex";
document.querySelector(".loading__container").style.display="none";
}
search();
//added this at api closed on feb 6 2:47
if(new Date()<new Date(API_CLOSED_DATE))
{
getAllData();
}
else
{
handleError();
}
//calling only for getting satus
getAllData(1); | //retrive the valu from array | random_line_split |
script.js | //global var to store the state name
var state_name_array=[];
//change to 1 if user search by specific data because if user search by specific date it dont has active element
var no_active=0;
const STATE_CODES={
AN: "Andaman and Nicobar Islands",
AP: "Andhra Pradesh",
AR: "Arunachal Pradesh",
AS: "Assam",
BR: "Bihar",
CH: "Chandigarh",
CT: "Chhattisgarh",
DL: "Delhi",
DN: "Dadra and Nagar Haveli and Daman and Diu",
GA: "Goa",
GJ: "Gujarat",
HP: "Himachal Pradesh",
HR: "Haryana",
JH: "Jharkhand",
JK: "Jammu and Kashmir",
KA: "Karnataka",
KL: "Kerala",
LA: "Ladakh",
LD: "Lakshadweep",
MH: "Maharashtra",
ML: "Meghalaya",
MN: "Manipur",
MP: "Madhya Pradesh",
MZ: "Mizoram",
NL: "Nagaland",
OR: "Odisha",
PB: "Punjab",
PY: "Puducherry",
RJ: "Rajasthan",
SK: "Sikkim",
TG: "Telangana",
TN: "Tamil Nadu",
TR: "Tripura",
UN: "State Unassigned",
UP: "Uttar Pradesh",
UT: "Uttarakhand",
WB: "West Bengal",
};
const API_CLOSED_DATE="2021-02-01";
function getAllData(isstatus=0) {
/*
getting data from api which has "cases_time_series" "statewise" and "tested" details we only need statewise details
*/
var alldatalink="https://api.covid19india.org/data.json";
try
{
$.getJSON(alldatalink,function(data){
// storing prev day val for calculation of total value in india
let prev_index=data["cases_time_series"].length;
let prev_confirm=data["cases_time_series"][prev_index-1]["totalconfirmed"];
let prev_recoverd=data["cases_time_series"][prev_index-1]["totalrecovered"];
let prev_deaths=data["cases_time_series"][prev_index-1]["totaldeceased"];
//iterating only statewise object
$.each(data["statewise"],function(index,data){
//storing value for calculation
let date=data["lastupdatedtime"].split(" ")[0];
let active=data["active"];
let confirmed=data["confirmed"];
let recovered=data["recovered"];
let deaths=data["deaths"];
//adding the today status of covid
if(data["state"]==="Total")
{
//call function with isstatus =1
addDataToTable([date,active,confirmed,recovered,deaths],[prev_confirm,prev_recoverd,prev_deaths],1);
}
//pushing the state details to table
else if(!isstatus)
{
//to change date fromat
date=date.split("/").reverse().join("-");
//call function to get prev day data
getPrevDayData(date,data["statecode"],[date,active,confirmed,recovered,deaths,data["state"]])
}
})
}).fail(handleError);
}
catch(error)
{
handleError();
}
}
function getPrevDayData(tdate,statecode,data_array) {
let prevdate=tdate.split("-");
//creating date object and month start from zero so minus one
prevdate=new Date(prevdate[0],prevdate[1]-1,prevdate[2]);
//calculating prev day by sub one
prevdate.setDate(prevdate.getDate()-1);
// it return day/month/year, hr:min:sec AM
prevdate=prevdate.toLocaleString();
//spilting date and time
prevdate=prevdate.split(",")[0];
// convert day/month/year day month year
prevdate=prevdate.split("/");
//if day is single digit adding zero to it
if(prevdate[0].length==1)
{
prevdate[0]="0"+prevdate[0]
}
//if date is single digit adding zero to it
if(prevdate[1].length==1)
{
prevdate[1]="0"+prevdate[1]
}
//changing date format to year/month/date
prevdate=prevdate[2]+"-"+prevdate[0]+"-"+prevdate[1];
let link="https://api.covid19india.org/v3/data-"+prevdate+".json";
//store prev day data in array
let prev_data_array=[]
fetch(link).then((response) => {
return response.json()
}).then((data) =>{
if(data[statecode]!==undefined)
{
if(data[statecode]["total"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["confirmed"])
}
if(data[statecode]["total"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["recovered"])
}
//if none one died pushing zero
if(data[statecode]["total"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["deceased"])
}
//call functio to add data to tabel isstatus =0
addDataToTable(data_array,prev_data_array,0);
}
}).catch(handleError);
}
function addDataToTable(data_array,prev_data_array,isstatus,isold=0)
{
//retrive the valu from array
let date=data_array[0];
let active=data_array[1];
let confirmed=data_array[2];
let recovered=data_array[3];
let deaths=data_array[4];
let prev_confirm=prev_data_array[0]
let prev_recoverd=prev_data_array[1]
let prev_deaths=prev_data_array[2]
if(!isold)
{
//calculating the difference
var diff_confirm=(confirmed-prev_confirm);
var diff_deaths=(deaths-prev_deaths);
var diff_recovered=(recovered-prev_recoverd);
}
else
{
var diff_confirm=prev_confirm;
var diff_deaths=prev_deaths;
var diff_recovered =prev_recoverd;
}
// img src
let up_img_src="image/upimg.png";
let down_img_src="image/downimg.png";
let upgreen_img_src="image/upgreenimg.png"
let confirm_pic,active_element,recovered_pic,deaths_pic;
let increases="<br><img src="+up_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
let decreases="<br><img src="+down_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
//checking if it add to satus or table
if(isstatus==1)
{
//checking for diff and adding corresponding image for it
if(diff_confirm>0)
{
$("#confirmed-no").append(confirmed+increases+diff_confirm+"</span>");
}
//check if is negative to avoid adding img for zero
else if (diff_confirm<=0)
{
$("#confirmed-no").append(confirmed+decreases+Math.abs(diff_confirm)+"</span>");
}
if(diff_deaths>0)
{
$("#deaths-no").append(deaths+increases+diff_deaths+"</span>");
}
else if(diff_deaths<=0)
{
$("#deaths-no").append(deaths+decreases+Math.abs(diff_deaths)+"</span>");
}
if(diff_recovered>0)
{
//setting uparrow img for recovered
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
$("#recovered-no").append(recovered+increases+diff_recovered+"</span>");
}
else if (diff_recovered<=0)
{
$("#recovered-no").append(recovered+decreases+Math.abs(diff_recovered)+"</span>");
}
$("#active-no").append(active);
$("#status-date").text(date);
}
//add data to table
else
{
if(diff_confirm>0)
{
confirm_pic=confirmed+increases+diff_confirm;
}
else if(diff_confirm<=0)
{
confirm_pic=confirmed+decreases+Math.abs(diff_confirm);
}
if(diff_deaths>0)
{
deaths_pic=deaths+increases+Math.abs(diff_deaths);
}
else if(diff_deaths<=0)
{
deaths_pic=deaths+decreases+Math.abs(diff_deaths);
}
if(diff_recovered>0)
{
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
recovered_pic=recovered+increases+diff_recovered;
}
else if(diff_recovered<=0)
{
recovered_pic=recovered+decreases+Math.abs(diff_recovered);
}
//checking if active persent or not
if(active)
{
active_element= "<div class='active-state'>Active<br><span id='active-no'>"+active+"</span></div>"
}
else{
active_element="<div></div>";
}
//retriving state value
let state_name=data_array[5]?data_array[5]:"unknown state";
state_name_array.push(state_name.toLowerCase())
let class_name=state_name.split(" ")[0].toLowerCase();
let state_html="<div class='state-div "+class_name+"' > \
<p class='state-name' onclick='goToState(event)'>"+state_name+"<br>"+date+"</p> \
<div class='state-data d-flex flex-wrap justify-content-between' > \
"+active_element+
"<div class='confirmed-state'>Confirmed<br><span id='active-no'>"+confirm_pic+"</span></div>\
<div class='recovered-state'>Recovered<br><span id='active-no'>"+recovered_pic+"</span></div>\
<div class='deaths-state'>Deaths<br><span id='active-no'>"+deaths_pic+"</span></div><br>\
</div>"
$(".state-container").append(state_html);
}
}
function goToState(event)
{
//it return total text
let array=event.target.nextSibling.nextSibling.innerText.split("\n");
debugger;
let tactive=tconfirmed=trecovered=tdeaths=diff_confirm=diff_recovered=diff_deaths=0;
if(no_active)
{
tactive="unknown";
index=2
}
else{
if(array[1])
{
tactive=array[1];
}
index=0;
}
if(array[3-index])
{
tconfirmed=array[3-index];
}
if(array[4-index])
{
diff_confirm=array[4-index];
}
if(array[6-index])
{
trecovered=array[6-index];
}
if(array[7-index])
{
diff_recovered=array[7-index];
}
if(array[9-index])
{
tdeaths=array[9-index];
}
if(array[10-index])
{
diff_deaths=array[10-index];
}
//store all data as single object
let state_data={"tactive":tactive,"tconfirmed":tconfirmed,"trecovered":trecovered,"tdeaths":tdeaths,
"diff_confirm":diff_confirm,"diff_recovered":diff_recovered,"diff_deaths":diff_deaths};
//getting state
let state_name =event.target.innerText.split("\n")[0];
let prevdate= event.target.innerText.split("\n")[1];
sessionStorage.setItem("state",state_name);
sessionStorage.setItem("prevdate",prevdate);
sessionStorage.setItem("state_data",JSON.stringify(state_data));
window.location="state.html"
}
function searchState()
{
let search_val=$("#search_input").val().toLowerCase();
$(".search_result").css("display","none");
let is_has;
if(state_name_array.length)
{
is_has=false;
}
else
{
is_has=true;
}
$.each(state_name_array,function(index,state_name){
//split the class name if it has space
let class_name=state_name.split(" ")[0]
if(state_name.startsWith(search_val))
{
is_has=true;
$("."+class_name).addClass("show");
$("."+class_name).removeClass("hide");
}
else
{
$("."+class_name).addClass("hide");
}
});
if(!is_has)
{
$(".search_result").css("display","flex");
}
}
function search()
{
let search_button=document.querySelector("#search_button");
search_button.addEventListener("click",async function(){
let loading=document.querySelector(".loading__container");
loading.style["display"]="flex";
let date=$("#search_date").val();
//if user requested date greater than API CLOSED DATE handle error
if(new Date(date)<new Date(API_CLOSED_DATE))
{
await getSpecificData(date);
}
else
{
loading.style["display"]="none";
handleError();
}
});
}
function getSpecificData(date)
|
function handleError()
{
document.querySelector(".state-container").style.display="none";
document.querySelector(".error_container").style.display="flex";
document.querySelector(".loading__container").style.display="none";
}
search();
//added this at api closed on feb 6 2:47
if(new Date()<new Date(API_CLOSED_DATE))
{
getAllData();
}
else
{
handleError();
}
//calling only for getting satus
getAllData(1); | {
//setting no active to 1
no_active=1;
$(".state-container").empty();
document.querySelector(".state-container").style.display="flex";
document.querySelector(".error_container").style.display="none";
let link="https://api.covid19india.org/v3/data-"+date+".json";
$.getJSON(link,function(datas){
for(data in datas){
let data_array=[];
let prev_data_array=[];
data_array.push(date);
//pushing active has no active
data_array.push(0)
if(datas[data]["delta"])
{
if(datas[data]["delta"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["confirmed"])
}
if(datas[data]["delta"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["recovered"])
}
//if none one died pushing zero
if(datas[data]["delta"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(datas[data]["delta"]["deceased"])
}
}
if(datas[data]["total"])
{
if(datas[data]["total"]["confirmed"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["confirmed"])
}
if(datas[data]["total"]["recovered"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["recovered"])
}
//if none one died pushing zero
if(datas[data]["total"]["deceased"]===undefined)
{
data_array.push(0)
}
else
{
data_array.push(datas[data]["total"]["deceased"])
}
}
//storing the coressponding sates name
data_array[5]=STATE_CODES[data]?STATE_CODES[data]:"state code is "+data;
addDataToTable(data_array,prev_data_array,0,1);
}
//calling the search state function to filter if user entered
let loading=document.querySelector(".loading__container");
loading.style["display"]="none";
searchState();
}).catch(handleError);
} | identifier_body |
collect-raman.py | #!/usr/bin/env python
# * read dark with a certain int time
# * turn laser on at a certain power level, does not need to be calibrated
# * read signal with the same int time
# * turn laser off
#
# The script then repeats this over and over.
import sys
import re
from time import sleep
from datetime import datetime
import matplotlib.pyplot as plt
import traceback
import usb.core
import argparse
import struct
import sys
HOST_TO_DEVICE = 0x40
DEVICE_TO_HOST = 0xC0
TIMEOUT_MS = 1000
MAX_PAGES = 8
PAGE_SIZE = 64
# An extensible, stateful "Test Fixture"
class Fixture(object):
############################################################################
# Lifecycle
############################################################################
def __init__(self):
self.outfile = None
self.device = None
self.dark = None
# parse cmd-line args
parser = argparse.ArgumentParser()
parser.add_argument("--bin2x2", action="store_true", help="apply 2x2 binning")
parser.add_argument("--count", type=int, help="read the given number of spectra (default 1)", default=1)
parser.add_argument("--dark", action="store_true", help="collect dark and perform dark correction")
parser.add_argument("--debug", action="store_true", help="debug output (default off)")
parser.add_argument("--delay-ms", type=int, help="delay n ms between spectra (default 10)", default=10)
parser.add_argument("--fire-laser", action="store_true", help="to avoid accidents, WILL NOT fire laser unless specified")
parser.add_argument("--gain-db", type=float, help="gain in dB (default 8.0)", default=8.0)
parser.add_argument("--integration-time-ms", type=int, help="integration time (ms) (default 100)", default=100)
parser.add_argument("--laser-power-perc", type=float, help="laser power as a percentage (range 0.1-100) (default 100)")
parser.add_argument("--laser-warmup-ms", type=int, help="laser warmup delay in ms (default 1000)", default=1000)
parser.add_argument("--outfile", type=str, help="outfile to save full spectra")
parser.add_argument("--plot", action="store_true", help="graph spectra after collection")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f")
self.min_laser_power_mW = self.unpack((3, 32, 4), "f")
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
print("*** not firing laser because --fire-laser not specified ***")
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def | (self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0)
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
if spectrum is None or self.args.scans_to_average < 2:
return spectrum
for i in range(self.args.scans_to_average - 1):
tmp = self.get_spectrum()
if tmp is None:
return
for j in range(len(spectrum)):
spectrum[j] += tmp[i]
for i in range(len(spectrum)):
spectrum[i] = spectrum[i] / self.args.scans_to_average
return spectrum
def get_spectrum(self):
timeout_ms = TIMEOUT_MS + self.args.integration_time_ms * 2
self.send_cmd(0xad, 0)
data = self.device.read(0x82, self.pixels * 2, timeout=timeout_ms)
if data is None:
return
spectrum = []
for i in range(0, len(data), 2):
spectrum.append(data[i] | (data[i+1] << 8))
if len(spectrum) != self.pixels:
return
# stomp blank SiG pixels (first 3 and last)
for i in range(3):
spectrum[i] = spectrum[3]
spectrum[-1] = spectrum[-2]
# 2x2 binning
if self.args.bin2x2:
for i in range(self.pixels-1):
spectrum[i] = (spectrum[i] + spectrum[i+1]) / 2.0
return spectrum
## perc is a float (0.0, 100.0)
def set_laser_power_perc(self, perc):
value = float(max(0, min(100, perc)))
self.set_mod_enable(False)
if value >= 100:
return
if value < 0.1:
self.set_laser_enable(False)
return
period_us = 1000
width_us = int(round(1.0 * value * period_us / 100.0, 0)) # note value is in range (0, 100) not (0, 1)
width_us = max(1, min(width_us, period_us))
self.set_mod_period_us(period_us)
self.set_mod_width_us(width_us)
self.set_mod_enable(True)
def set_mod_enable(self, flag):
return self.send_cmd(0xbd, 1 if flag else 0)
def set_mod_period_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xc7, lsw, msw, buf)
def set_mod_width_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xdb, lsw, msw, buf)
############################################################################
# Utility Methods
############################################################################
def to40bit(self, us):
lsw = us & 0xffff
msw = (us >> 16) & 0xffff
buf = [ (us >> 32) & 0xff, 0 * 7 ]
return (lsw, msw, buf)
def debug(self, msg):
if self.args.debug:
print(f"DEBUG: {msg}")
def send_cmd(self, cmd, value=0, index=0, buf=None):
if buf is None:
buf = [0] * 8
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x) >> %s" % (HOST_TO_DEVICE, cmd, value, index, buf))
self.device.ctrl_transfer(HOST_TO_DEVICE, cmd, value, index, buf, TIMEOUT_MS)
def get_cmd(self, cmd, value=0, index=0, length=64, lsb_len=None, msb_len=None, label=None):
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d)" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS))
result = self.device.ctrl_transfer(DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS)
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d) << %s" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS, result))
value = 0
if msb_len is not None:
for i in range(msb_len):
value = value << 8 | result[i]
return value
elif lsb_len is not None:
for i in range(lsb_len):
value = (result[i] << (8 * i)) | value
return value
else:
return result
def unpack(self, address, data_type):
page = address[0]
start_byte = address[1]
length = address[2]
end_byte = start_byte + length
buf = self.buffers[page]
if buf is None or end_byte > len(buf):
raise("error unpacking EEPROM page %d, offset %d, len %d as %s: buf is %s (label %s)" %
(page, start_byte, length, data_type, buf, label))
if data_type == "s":
result = ""
for c in buf[start_byte:end_byte]:
if c == 0:
break
result += chr(c)
else:
result = struct.unpack(data_type, buf[start_byte:end_byte])[0]
return result
fixture = Fixture()
if fixture.device is not None:
fixture.run()
| set_gain_db | identifier_name |
collect-raman.py | #!/usr/bin/env python
# * read dark with a certain int time
# * turn laser on at a certain power level, does not need to be calibrated
# * read signal with the same int time
# * turn laser off
#
# The script then repeats this over and over.
import sys
import re
from time import sleep
from datetime import datetime
import matplotlib.pyplot as plt
import traceback
import usb.core
import argparse
import struct
import sys
HOST_TO_DEVICE = 0x40
DEVICE_TO_HOST = 0xC0
TIMEOUT_MS = 1000
MAX_PAGES = 8
PAGE_SIZE = 64
# An extensible, stateful "Test Fixture"
class Fixture(object):
############################################################################
# Lifecycle
############################################################################
def __init__(self):
self.outfile = None
self.device = None
self.dark = None
# parse cmd-line args
parser = argparse.ArgumentParser()
parser.add_argument("--bin2x2", action="store_true", help="apply 2x2 binning")
parser.add_argument("--count", type=int, help="read the given number of spectra (default 1)", default=1)
parser.add_argument("--dark", action="store_true", help="collect dark and perform dark correction")
parser.add_argument("--debug", action="store_true", help="debug output (default off)")
parser.add_argument("--delay-ms", type=int, help="delay n ms between spectra (default 10)", default=10)
parser.add_argument("--fire-laser", action="store_true", help="to avoid accidents, WILL NOT fire laser unless specified")
parser.add_argument("--gain-db", type=float, help="gain in dB (default 8.0)", default=8.0)
parser.add_argument("--integration-time-ms", type=int, help="integration time (ms) (default 100)", default=100)
parser.add_argument("--laser-power-perc", type=float, help="laser power as a percentage (range 0.1-100) (default 100)")
parser.add_argument("--laser-warmup-ms", type=int, help="laser warmup delay in ms (default 1000)", default=1000)
parser.add_argument("--outfile", type=str, help="outfile to save full spectra")
parser.add_argument("--plot", action="store_true", help="graph spectra after collection")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f")
self.min_laser_power_mW = self.unpack((3, 32, 4), "f")
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
|
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def set_gain_db(self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0)
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
if spectrum is None or self.args.scans_to_average < 2:
return spectrum
for i in range(self.args.scans_to_average - 1):
tmp = self.get_spectrum()
if tmp is None:
return
for j in range(len(spectrum)):
spectrum[j] += tmp[i]
for i in range(len(spectrum)):
spectrum[i] = spectrum[i] / self.args.scans_to_average
return spectrum
def get_spectrum(self):
timeout_ms = TIMEOUT_MS + self.args.integration_time_ms * 2
self.send_cmd(0xad, 0)
data = self.device.read(0x82, self.pixels * 2, timeout=timeout_ms)
if data is None:
return
spectrum = []
for i in range(0, len(data), 2):
spectrum.append(data[i] | (data[i+1] << 8))
if len(spectrum) != self.pixels:
return
# stomp blank SiG pixels (first 3 and last)
for i in range(3):
spectrum[i] = spectrum[3]
spectrum[-1] = spectrum[-2]
# 2x2 binning
if self.args.bin2x2:
for i in range(self.pixels-1):
spectrum[i] = (spectrum[i] + spectrum[i+1]) / 2.0
return spectrum
## perc is a float (0.0, 100.0)
def set_laser_power_perc(self, perc):
value = float(max(0, min(100, perc)))
self.set_mod_enable(False)
if value >= 100:
return
if value < 0.1:
self.set_laser_enable(False)
return
period_us = 1000
width_us = int(round(1.0 * value * period_us / 100.0, 0)) # note value is in range (0, 100) not (0, 1)
width_us = max(1, min(width_us, period_us))
self.set_mod_period_us(period_us)
self.set_mod_width_us(width_us)
self.set_mod_enable(True)
def set_mod_enable(self, flag):
return self.send_cmd(0xbd, 1 if flag else 0)
def set_mod_period_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xc7, lsw, msw, buf)
def set_mod_width_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xdb, lsw, msw, buf)
############################################################################
# Utility Methods
############################################################################
def to40bit(self, us):
lsw = us & 0xffff
msw = (us >> 16) & 0xffff
buf = [ (us >> 32) & 0xff, 0 * 7 ]
return (lsw, msw, buf)
def debug(self, msg):
if self.args.debug:
print(f"DEBUG: {msg}")
def send_cmd(self, cmd, value=0, index=0, buf=None):
if buf is None:
buf = [0] * 8
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x) >> %s" % (HOST_TO_DEVICE, cmd, value, index, buf))
self.device.ctrl_transfer(HOST_TO_DEVICE, cmd, value, index, buf, TIMEOUT_MS)
def get_cmd(self, cmd, value=0, index=0, length=64, lsb_len=None, msb_len=None, label=None):
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d)" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS))
result = self.device.ctrl_transfer(DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS)
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d) << %s" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS, result))
value = 0
if msb_len is not None:
for i in range(msb_len):
value = value << 8 | result[i]
return value
elif lsb_len is not None:
for i in range(lsb_len):
value = (result[i] << (8 * i)) | value
return value
else:
return result
def unpack(self, address, data_type):
page = address[0]
start_byte = address[1]
length = address[2]
end_byte = start_byte + length
buf = self.buffers[page]
if buf is None or end_byte > len(buf):
raise("error unpacking EEPROM page %d, offset %d, len %d as %s: buf is %s (label %s)" %
(page, start_byte, length, data_type, buf, label))
if data_type == "s":
result = ""
for c in buf[start_byte:end_byte]:
if c == 0:
break
result += chr(c)
else:
result = struct.unpack(data_type, buf[start_byte:end_byte])[0]
return result
fixture = Fixture()
if fixture.device is not None:
fixture.run()
| print("*** not firing laser because --fire-laser not specified ***") | conditional_block |
collect-raman.py | #!/usr/bin/env python
# * read dark with a certain int time
# * turn laser on at a certain power level, does not need to be calibrated
# * read signal with the same int time
# * turn laser off
#
# The script then repeats this over and over.
import sys
import re
from time import sleep
from datetime import datetime
import matplotlib.pyplot as plt
import traceback
import usb.core
import argparse
import struct
import sys
HOST_TO_DEVICE = 0x40
DEVICE_TO_HOST = 0xC0
TIMEOUT_MS = 1000
MAX_PAGES = 8
PAGE_SIZE = 64
# An extensible, stateful "Test Fixture"
class Fixture(object):
############################################################################
# Lifecycle
############################################################################
def __init__(self):
self.outfile = None
self.device = None
self.dark = None
# parse cmd-line args
parser = argparse.ArgumentParser()
parser.add_argument("--bin2x2", action="store_true", help="apply 2x2 binning")
parser.add_argument("--count", type=int, help="read the given number of spectra (default 1)", default=1)
parser.add_argument("--dark", action="store_true", help="collect dark and perform dark correction")
parser.add_argument("--debug", action="store_true", help="debug output (default off)")
parser.add_argument("--delay-ms", type=int, help="delay n ms between spectra (default 10)", default=10)
parser.add_argument("--fire-laser", action="store_true", help="to avoid accidents, WILL NOT fire laser unless specified")
parser.add_argument("--gain-db", type=float, help="gain in dB (default 8.0)", default=8.0)
parser.add_argument("--integration-time-ms", type=int, help="integration time (ms) (default 100)", default=100)
parser.add_argument("--laser-power-perc", type=float, help="laser power as a percentage (range 0.1-100) (default 100)")
parser.add_argument("--laser-warmup-ms", type=int, help="laser warmup delay in ms (default 1000)", default=1000)
parser.add_argument("--outfile", type=str, help="outfile to save full spectra")
parser.add_argument("--plot", action="store_true", help="graph spectra after collection")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f") |
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
print("*** not firing laser because --fire-laser not specified ***")
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def set_gain_db(self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0)
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
if spectrum is None or self.args.scans_to_average < 2:
return spectrum
for i in range(self.args.scans_to_average - 1):
tmp = self.get_spectrum()
if tmp is None:
return
for j in range(len(spectrum)):
spectrum[j] += tmp[i]
for i in range(len(spectrum)):
spectrum[i] = spectrum[i] / self.args.scans_to_average
return spectrum
def get_spectrum(self):
timeout_ms = TIMEOUT_MS + self.args.integration_time_ms * 2
self.send_cmd(0xad, 0)
data = self.device.read(0x82, self.pixels * 2, timeout=timeout_ms)
if data is None:
return
spectrum = []
for i in range(0, len(data), 2):
spectrum.append(data[i] | (data[i+1] << 8))
if len(spectrum) != self.pixels:
return
# stomp blank SiG pixels (first 3 and last)
for i in range(3):
spectrum[i] = spectrum[3]
spectrum[-1] = spectrum[-2]
# 2x2 binning
if self.args.bin2x2:
for i in range(self.pixels-1):
spectrum[i] = (spectrum[i] + spectrum[i+1]) / 2.0
return spectrum
## perc is a float (0.0, 100.0)
def set_laser_power_perc(self, perc):
value = float(max(0, min(100, perc)))
self.set_mod_enable(False)
if value >= 100:
return
if value < 0.1:
self.set_laser_enable(False)
return
period_us = 1000
width_us = int(round(1.0 * value * period_us / 100.0, 0)) # note value is in range (0, 100) not (0, 1)
width_us = max(1, min(width_us, period_us))
self.set_mod_period_us(period_us)
self.set_mod_width_us(width_us)
self.set_mod_enable(True)
def set_mod_enable(self, flag):
return self.send_cmd(0xbd, 1 if flag else 0)
def set_mod_period_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xc7, lsw, msw, buf)
def set_mod_width_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xdb, lsw, msw, buf)
############################################################################
# Utility Methods
############################################################################
def to40bit(self, us):
lsw = us & 0xffff
msw = (us >> 16) & 0xffff
buf = [ (us >> 32) & 0xff, 0 * 7 ]
return (lsw, msw, buf)
def debug(self, msg):
if self.args.debug:
print(f"DEBUG: {msg}")
def send_cmd(self, cmd, value=0, index=0, buf=None):
if buf is None:
buf = [0] * 8
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x) >> %s" % (HOST_TO_DEVICE, cmd, value, index, buf))
self.device.ctrl_transfer(HOST_TO_DEVICE, cmd, value, index, buf, TIMEOUT_MS)
def get_cmd(self, cmd, value=0, index=0, length=64, lsb_len=None, msb_len=None, label=None):
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d)" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS))
result = self.device.ctrl_transfer(DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS)
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d) << %s" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS, result))
value = 0
if msb_len is not None:
for i in range(msb_len):
value = value << 8 | result[i]
return value
elif lsb_len is not None:
for i in range(lsb_len):
value = (result[i] << (8 * i)) | value
return value
else:
return result
def unpack(self, address, data_type):
page = address[0]
start_byte = address[1]
length = address[2]
end_byte = start_byte + length
buf = self.buffers[page]
if buf is None or end_byte > len(buf):
raise("error unpacking EEPROM page %d, offset %d, len %d as %s: buf is %s (label %s)" %
(page, start_byte, length, data_type, buf, label))
if data_type == "s":
result = ""
for c in buf[start_byte:end_byte]:
if c == 0:
break
result += chr(c)
else:
result = struct.unpack(data_type, buf[start_byte:end_byte])[0]
return result
fixture = Fixture()
if fixture.device is not None:
fixture.run() | self.min_laser_power_mW = self.unpack((3, 32, 4), "f") | random_line_split |
collect-raman.py | #!/usr/bin/env python
# * read dark with a certain int time
# * turn laser on at a certain power level, does not need to be calibrated
# * read signal with the same int time
# * turn laser off
#
# The script then repeats this over and over.
import sys
import re
from time import sleep
from datetime import datetime
import matplotlib.pyplot as plt
import traceback
import usb.core
import argparse
import struct
import sys
HOST_TO_DEVICE = 0x40
DEVICE_TO_HOST = 0xC0
TIMEOUT_MS = 1000
MAX_PAGES = 8
PAGE_SIZE = 64
# An extensible, stateful "Test Fixture"
class Fixture(object):
############################################################################
# Lifecycle
############################################################################
def __init__(self):
self.outfile = None
self.device = None
self.dark = None
# parse cmd-line args
parser = argparse.ArgumentParser()
parser.add_argument("--bin2x2", action="store_true", help="apply 2x2 binning")
parser.add_argument("--count", type=int, help="read the given number of spectra (default 1)", default=1)
parser.add_argument("--dark", action="store_true", help="collect dark and perform dark correction")
parser.add_argument("--debug", action="store_true", help="debug output (default off)")
parser.add_argument("--delay-ms", type=int, help="delay n ms between spectra (default 10)", default=10)
parser.add_argument("--fire-laser", action="store_true", help="to avoid accidents, WILL NOT fire laser unless specified")
parser.add_argument("--gain-db", type=float, help="gain in dB (default 8.0)", default=8.0)
parser.add_argument("--integration-time-ms", type=int, help="integration time (ms) (default 100)", default=100)
parser.add_argument("--laser-power-perc", type=float, help="laser power as a percentage (range 0.1-100) (default 100)")
parser.add_argument("--laser-warmup-ms", type=int, help="laser warmup delay in ms (default 1000)", default=1000)
parser.add_argument("--outfile", type=str, help="outfile to save full spectra")
parser.add_argument("--plot", action="store_true", help="graph spectra after collection")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f")
self.min_laser_power_mW = self.unpack((3, 32, 4), "f")
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
print("*** not firing laser because --fire-laser not specified ***")
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def set_gain_db(self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
|
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
if spectrum is None or self.args.scans_to_average < 2:
return spectrum
for i in range(self.args.scans_to_average - 1):
tmp = self.get_spectrum()
if tmp is None:
return
for j in range(len(spectrum)):
spectrum[j] += tmp[i]
for i in range(len(spectrum)):
spectrum[i] = spectrum[i] / self.args.scans_to_average
return spectrum
def get_spectrum(self):
timeout_ms = TIMEOUT_MS + self.args.integration_time_ms * 2
self.send_cmd(0xad, 0)
data = self.device.read(0x82, self.pixels * 2, timeout=timeout_ms)
if data is None:
return
spectrum = []
for i in range(0, len(data), 2):
spectrum.append(data[i] | (data[i+1] << 8))
if len(spectrum) != self.pixels:
return
# stomp blank SiG pixels (first 3 and last)
for i in range(3):
spectrum[i] = spectrum[3]
spectrum[-1] = spectrum[-2]
# 2x2 binning
if self.args.bin2x2:
for i in range(self.pixels-1):
spectrum[i] = (spectrum[i] + spectrum[i+1]) / 2.0
return spectrum
## perc is a float (0.0, 100.0)
def set_laser_power_perc(self, perc):
value = float(max(0, min(100, perc)))
self.set_mod_enable(False)
if value >= 100:
return
if value < 0.1:
self.set_laser_enable(False)
return
period_us = 1000
width_us = int(round(1.0 * value * period_us / 100.0, 0)) # note value is in range (0, 100) not (0, 1)
width_us = max(1, min(width_us, period_us))
self.set_mod_period_us(period_us)
self.set_mod_width_us(width_us)
self.set_mod_enable(True)
def set_mod_enable(self, flag):
return self.send_cmd(0xbd, 1 if flag else 0)
def set_mod_period_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xc7, lsw, msw, buf)
def set_mod_width_us(self, us):
(lsw, msw, buf) = self.to40bit(us)
return self.send_cmd(0xdb, lsw, msw, buf)
############################################################################
# Utility Methods
############################################################################
def to40bit(self, us):
lsw = us & 0xffff
msw = (us >> 16) & 0xffff
buf = [ (us >> 32) & 0xff, 0 * 7 ]
return (lsw, msw, buf)
def debug(self, msg):
if self.args.debug:
print(f"DEBUG: {msg}")
def send_cmd(self, cmd, value=0, index=0, buf=None):
if buf is None:
buf = [0] * 8
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x) >> %s" % (HOST_TO_DEVICE, cmd, value, index, buf))
self.device.ctrl_transfer(HOST_TO_DEVICE, cmd, value, index, buf, TIMEOUT_MS)
def get_cmd(self, cmd, value=0, index=0, length=64, lsb_len=None, msb_len=None, label=None):
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d)" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS))
result = self.device.ctrl_transfer(DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS)
self.debug("ctrl_transfer(0x%02x, 0x%02x, 0x%04x, 0x%04x, len %d, timeout %d) << %s" % (DEVICE_TO_HOST, cmd, value, index, length, TIMEOUT_MS, result))
value = 0
if msb_len is not None:
for i in range(msb_len):
value = value << 8 | result[i]
return value
elif lsb_len is not None:
for i in range(lsb_len):
value = (result[i] << (8 * i)) | value
return value
else:
return result
def unpack(self, address, data_type):
page = address[0]
start_byte = address[1]
length = address[2]
end_byte = start_byte + length
buf = self.buffers[page]
if buf is None or end_byte > len(buf):
raise("error unpacking EEPROM page %d, offset %d, len %d as %s: buf is %s (label %s)" %
(page, start_byte, length, data_type, buf, label))
if data_type == "s":
result = ""
for c in buf[start_byte:end_byte]:
if c == 0:
break
result += chr(c)
else:
result = struct.unpack(data_type, buf[start_byte:end_byte])[0]
return result
fixture = Fixture()
if fixture.device is not None:
fixture.run()
| self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0) | identifier_body |
MT3D_PP_viz.py | import os
import flopy
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
import flopy
from HydroModelBuilder.Utilities.model_assessment import metric_me, metric_pbias, metric_rmse, plot_obs_vs_sim
def compareAllObs(self):
"""TODO: Docs"""
concobj = self.import_concs()
times = concobj.get_times()
scatterx = []
scattery = []
obs_sim_zone_all = []
# The definition of obs_sim_zone looks like:
for i in range(self.mf_model.model_data.model_time.t['steps']):
conc = concobj.get_data(totim=times[i])
self.compare_observed('C14', conc, nper=i)
obs_sim_zone_all += self.obs_sim_zone
scatterx = np.array([h[0] for h in obs_sim_zone_all])
scattery = np.array([h[1] for h in obs_sim_zone_all])
# First step is to set up the plot
width = 20
height = 5
multiplier = 1.0
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
ax = fig.add_subplot(1, 3, 1)
ax.set_title('Residuals')
ax.hist([loc[0] - loc[1] for loc in obs_sim_zone_all], bins=20, alpha=0.5)
ax = fig.add_subplot(1, 3, 2)
ax.set_title('Sim vs Obs (%d points)' % (len(scatterx)))
comp_zone_plots = {}
colours = ['r', 'orangered', 'y', 'green', 'teal', 'blue', 'fuchsia']
for i in xrange(1, 8):
scatterx2 = [loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolors='none', alpha=0.5)
plt.legend((comp_zone_plots[1], comp_zone_plots[2], comp_zone_plots[3],
comp_zone_plots[4], comp_zone_plots[5], comp_zone_plots[6],
comp_zone_plots[7]),
('qa', 'utb', 'utqa', 'utam', 'utaf', 'lta', 'bse'),
scatterpoints=1,
loc='upper left',
ncol=4,
fontsize=11)
plt.xlabel('Observed')
plt.ylabel('Simulated', labelpad=10)
ax.text(150, 75, 'Model Efficiency = %4.2f' % (metric_me(scattery, scatterx)))
ax.text(150, 40, 'PBIAS = %4.2f%%' % (metric_pbias(scattery, scatterx)))
ax.text(150, 20, 'RMSE = %4.2f' % (metric_rmse(scattery, scatterx)))
ax.plot(ax.get_ylim(), ax.get_ylim())
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Residuals in space')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
x = np.array([h[3] for h in obs_sim_zone_all])
y = np.array([h[4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
if col == nam:
rgb_ref += [rgb_all[index]]
# End if
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, end = ax.get_xlim()
start = start // 1000 * 1000 + 1000
end = end // 1000 * 1000 - 1000
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax3 = fig.add_axes([0.19, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax3)
ax = fig.add_subplot(2, 4, 6, aspect='equal')
ax.set_title('Renmark')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[5], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax4 = fig.add_axes([0.43, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax4)
ax = fig.add_subplot(2, 4, 7, aspect='equal')
ax.set_title('Basement')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[6], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.0))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax5 = fig.add_axes([0.67, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax5)
fig.subplots_adjust(left=0.01, right=0.95, bottom=0.05, top=0.95, wspace=0.1, hspace=0.12)
plt.show()
# End viewConcsByZone()
def compareAllObs2(self, specimen):
|
# End compareAllObs()
| conc = None
sft_conc = None
obs_group = self.mf_model.model_data.observations.obs_group
obs_sim_zone_all = []
# Write observation to file
for obs_set in obs_group:
obs_sim_zone_all = []
obs_type = obs_group[obs_set]['obs_type']
# Import the required model outputs for processing
if obs_type not in ['concentration', 'EC', 'Radon']:
continue
else:
print("Processing {}".format(obs_set))
if (obs_type == 'concentration') & (specimen == 'C14'):
# Check if model outputs have already been imported and if not import
if not conc:
concobj = self.importConcs()
conc = concobj.get_alldata() # (totim=times[0])
# End if
elif (obs_type == 'EC') & (specimen == 'EC'):
try:
sft_conc['TIME']
except:
sft_conc = self.importSftConcs()
elif obs_type == 'Radon':
continue
else:
continue
# End if
# End if
obs_df = obs_group[obs_set]['time_series']
obs_df = obs_df[obs_df['active'] == True]
sim_map_dict = obs_group[obs_set]['mapped_observations']
if obs_group[obs_set]['domain'] == 'stream':
sft_location = obs_group[obs_set]['locations']['seg_loc']
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
seg = sft_location.loc[name]
sft = sft_conc
times = sft['TIME'].unique()
col_of_interest = obs_type
if obs_type == 'EC':
col_of_interest = 'SFR-CONCENTRATION'
sim_obs = sft[(sft['SFR-NODE'] == seg) &
(sft['TIME'] == times[interval])][col_of_interest].tolist()[0]
obs_sim_zone_all += [[obs, sim_obs, seg]]
if obs_group[obs_set]['domain'] == 'porous':
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
zone = obs_df['zone'].loc[observation]
(lay, row, col) = [sim_map_dict[name][0],
sim_map_dict[name][1], sim_map_dict[name][2]]
sim_conc = [conc[interval][lay][row][col]]
sim_obs = np.mean(sim_conc)
obs_sim_zone_all += [[obs, sim_obs, zone]]
# End for
# End if
plot_obs_vs_sim(obs_set, obs_sim_zone_all, unc=2) | identifier_body |
MT3D_PP_viz.py | import os
import flopy
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
import flopy
from HydroModelBuilder.Utilities.model_assessment import metric_me, metric_pbias, metric_rmse, plot_obs_vs_sim
def | (self):
"""TODO: Docs"""
concobj = self.import_concs()
times = concobj.get_times()
scatterx = []
scattery = []
obs_sim_zone_all = []
# The definition of obs_sim_zone looks like:
for i in range(self.mf_model.model_data.model_time.t['steps']):
conc = concobj.get_data(totim=times[i])
self.compare_observed('C14', conc, nper=i)
obs_sim_zone_all += self.obs_sim_zone
scatterx = np.array([h[0] for h in obs_sim_zone_all])
scattery = np.array([h[1] for h in obs_sim_zone_all])
# First step is to set up the plot
width = 20
height = 5
multiplier = 1.0
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
ax = fig.add_subplot(1, 3, 1)
ax.set_title('Residuals')
ax.hist([loc[0] - loc[1] for loc in obs_sim_zone_all], bins=20, alpha=0.5)
ax = fig.add_subplot(1, 3, 2)
ax.set_title('Sim vs Obs (%d points)' % (len(scatterx)))
comp_zone_plots = {}
colours = ['r', 'orangered', 'y', 'green', 'teal', 'blue', 'fuchsia']
for i in xrange(1, 8):
scatterx2 = [loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolors='none', alpha=0.5)
plt.legend((comp_zone_plots[1], comp_zone_plots[2], comp_zone_plots[3],
comp_zone_plots[4], comp_zone_plots[5], comp_zone_plots[6],
comp_zone_plots[7]),
('qa', 'utb', 'utqa', 'utam', 'utaf', 'lta', 'bse'),
scatterpoints=1,
loc='upper left',
ncol=4,
fontsize=11)
plt.xlabel('Observed')
plt.ylabel('Simulated', labelpad=10)
ax.text(150, 75, 'Model Efficiency = %4.2f' % (metric_me(scattery, scatterx)))
ax.text(150, 40, 'PBIAS = %4.2f%%' % (metric_pbias(scattery, scatterx)))
ax.text(150, 20, 'RMSE = %4.2f' % (metric_rmse(scattery, scatterx)))
ax.plot(ax.get_ylim(), ax.get_ylim())
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Residuals in space')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
x = np.array([h[3] for h in obs_sim_zone_all])
y = np.array([h[4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
if col == nam:
rgb_ref += [rgb_all[index]]
# End if
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, end = ax.get_xlim()
start = start // 1000 * 1000 + 1000
end = end // 1000 * 1000 - 1000
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax3 = fig.add_axes([0.19, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax3)
ax = fig.add_subplot(2, 4, 6, aspect='equal')
ax.set_title('Renmark')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[5], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax4 = fig.add_axes([0.43, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax4)
ax = fig.add_subplot(2, 4, 7, aspect='equal')
ax.set_title('Basement')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[6], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.0))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax5 = fig.add_axes([0.67, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax5)
fig.subplots_adjust(left=0.01, right=0.95, bottom=0.05, top=0.95, wspace=0.1, hspace=0.12)
plt.show()
# End viewConcsByZone()
def compareAllObs2(self, specimen):
conc = None
sft_conc = None
obs_group = self.mf_model.model_data.observations.obs_group
obs_sim_zone_all = []
# Write observation to file
for obs_set in obs_group:
obs_sim_zone_all = []
obs_type = obs_group[obs_set]['obs_type']
# Import the required model outputs for processing
if obs_type not in ['concentration', 'EC', 'Radon']:
continue
else:
print("Processing {}".format(obs_set))
if (obs_type == 'concentration') & (specimen == 'C14'):
# Check if model outputs have already been imported and if not import
if not conc:
concobj = self.importConcs()
conc = concobj.get_alldata() # (totim=times[0])
# End if
elif (obs_type == 'EC') & (specimen == 'EC'):
try:
sft_conc['TIME']
except:
sft_conc = self.importSftConcs()
elif obs_type == 'Radon':
continue
else:
continue
# End if
# End if
obs_df = obs_group[obs_set]['time_series']
obs_df = obs_df[obs_df['active'] == True]
sim_map_dict = obs_group[obs_set]['mapped_observations']
if obs_group[obs_set]['domain'] == 'stream':
sft_location = obs_group[obs_set]['locations']['seg_loc']
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
seg = sft_location.loc[name]
sft = sft_conc
times = sft['TIME'].unique()
col_of_interest = obs_type
if obs_type == 'EC':
col_of_interest = 'SFR-CONCENTRATION'
sim_obs = sft[(sft['SFR-NODE'] == seg) &
(sft['TIME'] == times[interval])][col_of_interest].tolist()[0]
obs_sim_zone_all += [[obs, sim_obs, seg]]
if obs_group[obs_set]['domain'] == 'porous':
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
zone = obs_df['zone'].loc[observation]
(lay, row, col) = [sim_map_dict[name][0],
sim_map_dict[name][1], sim_map_dict[name][2]]
sim_conc = [conc[interval][lay][row][col]]
sim_obs = np.mean(sim_conc)
obs_sim_zone_all += [[obs, sim_obs, zone]]
# End for
# End if
plot_obs_vs_sim(obs_set, obs_sim_zone_all, unc=2)
# End compareAllObs()
| compareAllObs | identifier_name |
MT3D_PP_viz.py | import os
import flopy
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
import flopy
from HydroModelBuilder.Utilities.model_assessment import metric_me, metric_pbias, metric_rmse, plot_obs_vs_sim
def compareAllObs(self):
"""TODO: Docs"""
concobj = self.import_concs()
times = concobj.get_times()
scatterx = []
scattery = []
obs_sim_zone_all = []
# The definition of obs_sim_zone looks like:
for i in range(self.mf_model.model_data.model_time.t['steps']):
conc = concobj.get_data(totim=times[i])
self.compare_observed('C14', conc, nper=i)
obs_sim_zone_all += self.obs_sim_zone
scatterx = np.array([h[0] for h in obs_sim_zone_all])
scattery = np.array([h[1] for h in obs_sim_zone_all])
# First step is to set up the plot
width = 20
height = 5
multiplier = 1.0
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
ax = fig.add_subplot(1, 3, 1)
ax.set_title('Residuals')
ax.hist([loc[0] - loc[1] for loc in obs_sim_zone_all], bins=20, alpha=0.5) | ax.set_title('Sim vs Obs (%d points)' % (len(scatterx)))
comp_zone_plots = {}
colours = ['r', 'orangered', 'y', 'green', 'teal', 'blue', 'fuchsia']
for i in xrange(1, 8):
scatterx2 = [loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolors='none', alpha=0.5)
plt.legend((comp_zone_plots[1], comp_zone_plots[2], comp_zone_plots[3],
comp_zone_plots[4], comp_zone_plots[5], comp_zone_plots[6],
comp_zone_plots[7]),
('qa', 'utb', 'utqa', 'utam', 'utaf', 'lta', 'bse'),
scatterpoints=1,
loc='upper left',
ncol=4,
fontsize=11)
plt.xlabel('Observed')
plt.ylabel('Simulated', labelpad=10)
ax.text(150, 75, 'Model Efficiency = %4.2f' % (metric_me(scattery, scatterx)))
ax.text(150, 40, 'PBIAS = %4.2f%%' % (metric_pbias(scattery, scatterx)))
ax.text(150, 20, 'RMSE = %4.2f' % (metric_rmse(scattery, scatterx)))
ax.plot(ax.get_ylim(), ax.get_ylim())
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Residuals in space')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
x = np.array([h[3] for h in obs_sim_zone_all])
y = np.array([h[4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
if col == nam:
rgb_ref += [rgb_all[index]]
# End if
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, end = ax.get_xlim()
start = start // 1000 * 1000 + 1000
end = end // 1000 * 1000 - 1000
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax3 = fig.add_axes([0.19, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax3)
ax = fig.add_subplot(2, 4, 6, aspect='equal')
ax.set_title('Renmark')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[5], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax4 = fig.add_axes([0.43, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax4)
ax = fig.add_subplot(2, 4, 7, aspect='equal')
ax.set_title('Basement')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[6], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.0))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax5 = fig.add_axes([0.67, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax5)
fig.subplots_adjust(left=0.01, right=0.95, bottom=0.05, top=0.95, wspace=0.1, hspace=0.12)
plt.show()
# End viewConcsByZone()
def compareAllObs2(self, specimen):
conc = None
sft_conc = None
obs_group = self.mf_model.model_data.observations.obs_group
obs_sim_zone_all = []
# Write observation to file
for obs_set in obs_group:
obs_sim_zone_all = []
obs_type = obs_group[obs_set]['obs_type']
# Import the required model outputs for processing
if obs_type not in ['concentration', 'EC', 'Radon']:
continue
else:
print("Processing {}".format(obs_set))
if (obs_type == 'concentration') & (specimen == 'C14'):
# Check if model outputs have already been imported and if not import
if not conc:
concobj = self.importConcs()
conc = concobj.get_alldata() # (totim=times[0])
# End if
elif (obs_type == 'EC') & (specimen == 'EC'):
try:
sft_conc['TIME']
except:
sft_conc = self.importSftConcs()
elif obs_type == 'Radon':
continue
else:
continue
# End if
# End if
obs_df = obs_group[obs_set]['time_series']
obs_df = obs_df[obs_df['active'] == True]
sim_map_dict = obs_group[obs_set]['mapped_observations']
if obs_group[obs_set]['domain'] == 'stream':
sft_location = obs_group[obs_set]['locations']['seg_loc']
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
seg = sft_location.loc[name]
sft = sft_conc
times = sft['TIME'].unique()
col_of_interest = obs_type
if obs_type == 'EC':
col_of_interest = 'SFR-CONCENTRATION'
sim_obs = sft[(sft['SFR-NODE'] == seg) &
(sft['TIME'] == times[interval])][col_of_interest].tolist()[0]
obs_sim_zone_all += [[obs, sim_obs, seg]]
if obs_group[obs_set]['domain'] == 'porous':
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
zone = obs_df['zone'].loc[observation]
(lay, row, col) = [sim_map_dict[name][0],
sim_map_dict[name][1], sim_map_dict[name][2]]
sim_conc = [conc[interval][lay][row][col]]
sim_obs = np.mean(sim_conc)
obs_sim_zone_all += [[obs, sim_obs, zone]]
# End for
# End if
plot_obs_vs_sim(obs_set, obs_sim_zone_all, unc=2)
# End compareAllObs() |
ax = fig.add_subplot(1, 3, 2) | random_line_split |
MT3D_PP_viz.py | import os
import flopy
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
import flopy
from HydroModelBuilder.Utilities.model_assessment import metric_me, metric_pbias, metric_rmse, plot_obs_vs_sim
def compareAllObs(self):
"""TODO: Docs"""
concobj = self.import_concs()
times = concobj.get_times()
scatterx = []
scattery = []
obs_sim_zone_all = []
# The definition of obs_sim_zone looks like:
for i in range(self.mf_model.model_data.model_time.t['steps']):
conc = concobj.get_data(totim=times[i])
self.compare_observed('C14', conc, nper=i)
obs_sim_zone_all += self.obs_sim_zone
scatterx = np.array([h[0] for h in obs_sim_zone_all])
scattery = np.array([h[1] for h in obs_sim_zone_all])
# First step is to set up the plot
width = 20
height = 5
multiplier = 1.0
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
ax = fig.add_subplot(1, 3, 1)
ax.set_title('Residuals')
ax.hist([loc[0] - loc[1] for loc in obs_sim_zone_all], bins=20, alpha=0.5)
ax = fig.add_subplot(1, 3, 2)
ax.set_title('Sim vs Obs (%d points)' % (len(scatterx)))
comp_zone_plots = {}
colours = ['r', 'orangered', 'y', 'green', 'teal', 'blue', 'fuchsia']
for i in xrange(1, 8):
scatterx2 = [loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolors='none', alpha=0.5)
plt.legend((comp_zone_plots[1], comp_zone_plots[2], comp_zone_plots[3],
comp_zone_plots[4], comp_zone_plots[5], comp_zone_plots[6],
comp_zone_plots[7]),
('qa', 'utb', 'utqa', 'utam', 'utaf', 'lta', 'bse'),
scatterpoints=1,
loc='upper left',
ncol=4,
fontsize=11)
plt.xlabel('Observed')
plt.ylabel('Simulated', labelpad=10)
ax.text(150, 75, 'Model Efficiency = %4.2f' % (metric_me(scattery, scatterx)))
ax.text(150, 40, 'PBIAS = %4.2f%%' % (metric_pbias(scattery, scatterx)))
ax.text(150, 20, 'RMSE = %4.2f' % (metric_rmse(scattery, scatterx)))
ax.plot(ax.get_ylim(), ax.get_ylim())
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Residuals in space')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
x = np.array([h[3] for h in obs_sim_zone_all])
y = np.array([h[4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
|
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, end = ax.get_xlim()
start = start // 1000 * 1000 + 1000
end = end // 1000 * 1000 - 1000
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax3 = fig.add_axes([0.19, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax3)
ax = fig.add_subplot(2, 4, 6, aspect='equal')
ax.set_title('Renmark')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[5], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax4 = fig.add_axes([0.43, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax4)
ax = fig.add_subplot(2, 4, 7, aspect='equal')
ax.set_title('Basement')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[6], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.0))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax5 = fig.add_axes([0.67, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax5)
fig.subplots_adjust(left=0.01, right=0.95, bottom=0.05, top=0.95, wspace=0.1, hspace=0.12)
plt.show()
# End viewConcsByZone()
def compareAllObs2(self, specimen):
conc = None
sft_conc = None
obs_group = self.mf_model.model_data.observations.obs_group
obs_sim_zone_all = []
# Write observation to file
for obs_set in obs_group:
obs_sim_zone_all = []
obs_type = obs_group[obs_set]['obs_type']
# Import the required model outputs for processing
if obs_type not in ['concentration', 'EC', 'Radon']:
continue
else:
print("Processing {}".format(obs_set))
if (obs_type == 'concentration') & (specimen == 'C14'):
# Check if model outputs have already been imported and if not import
if not conc:
concobj = self.importConcs()
conc = concobj.get_alldata() # (totim=times[0])
# End if
elif (obs_type == 'EC') & (specimen == 'EC'):
try:
sft_conc['TIME']
except:
sft_conc = self.importSftConcs()
elif obs_type == 'Radon':
continue
else:
continue
# End if
# End if
obs_df = obs_group[obs_set]['time_series']
obs_df = obs_df[obs_df['active'] == True]
sim_map_dict = obs_group[obs_set]['mapped_observations']
if obs_group[obs_set]['domain'] == 'stream':
sft_location = obs_group[obs_set]['locations']['seg_loc']
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
seg = sft_location.loc[name]
sft = sft_conc
times = sft['TIME'].unique()
col_of_interest = obs_type
if obs_type == 'EC':
col_of_interest = 'SFR-CONCENTRATION'
sim_obs = sft[(sft['SFR-NODE'] == seg) &
(sft['TIME'] == times[interval])][col_of_interest].tolist()[0]
obs_sim_zone_all += [[obs, sim_obs, seg]]
if obs_group[obs_set]['domain'] == 'porous':
for observation in obs_df.index:
interval = int(obs_df['interval'].loc[observation])
name = obs_df['name'].loc[observation]
obs = obs_df['value'].loc[observation]
zone = obs_df['zone'].loc[observation]
(lay, row, col) = [sim_map_dict[name][0],
sim_map_dict[name][1], sim_map_dict[name][2]]
sim_conc = [conc[interval][lay][row][col]]
sim_obs = np.mean(sim_conc)
obs_sim_zone_all += [[obs, sim_obs, zone]]
# End for
# End if
plot_obs_vs_sim(obs_set, obs_sim_zone_all, unc=2)
# End compareAllObs()
| if col == nam:
rgb_ref += [rgb_all[index]]
# End if | conditional_block |
linebreak.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) INFTY MULT accum breakwords linebreak linebreaking linebreaks linelen maxlength minlength nchars ostream overlen parasplit plass posn powf punct signum slen sstart tabwidth tlen underlen winfo wlen wordlen
use std::cmp;
use std::i64;
use std::io::{BufWriter, Stdout, Write};
use std::mem;
use uucore::crash;
use crate::parasplit::{ParaWords, Paragraph, WordInfo};
use crate::FmtOptions;
struct BreakArgs<'a> {
opts: &'a FmtOptions,
init_len: usize,
indent_str: &'a str,
indent_len: usize,
uniform: bool,
ostream: &'a mut BufWriter<Stdout>,
}
impl<'a> BreakArgs<'a> {
fn compute_width(&self, winfo: &WordInfo, posn: usize, fresh: bool) -> usize {
if fresh {
0
} else {
let post = winfo.after_tab;
match winfo.before_tab {
None => post,
Some(pre) => {
post + ((pre + posn) / self.opts.tabwidth + 1) * self.opts.tabwidth - posn
}
}
}
}
}
pub fn break_lines(
para: &Paragraph,
opts: &FmtOptions,
ostream: &mut BufWriter<Stdout>,
) -> std::io::Result<()> {
// indent
let p_indent = ¶.indent_str[..];
let p_indent_len = para.indent_len;
// words
let p_words = ParaWords::new(opts, para);
let mut p_words_words = p_words.words();
// the first word will *always* appear on the first line
// make sure of this here
let (w, w_len) = match p_words_words.next() {
Some(winfo) => (winfo.word, winfo.word_nchars),
None => {
return ostream.write_all(b"\n");
}
};
// print the init, if it exists, and get its length
let p_init_len = w_len
+ if opts.crown || opts.tagged {
// handle "init" portion
ostream.write_all(para.init_str.as_bytes())?;
para.init_len
} else if !para.mail_header {
// for non-(crown, tagged) that's the same as a normal indent
ostream.write_all(p_indent.as_bytes())?;
p_indent_len
} else {
// except that mail headers get no indent at all
0
};
// write first word after writing init
ostream.write_all(w.as_bytes())?;
// does this paragraph require uniform spacing?
let uniform = para.mail_header || opts.uniform;
let mut break_args = BreakArgs {
opts,
init_len: p_init_len,
indent_str: p_indent,
indent_len: p_indent_len,
uniform,
ostream,
};
if opts.quick || para.mail_header {
break_simple(p_words_words, &mut break_args)
} else {
break_knuth_plass(p_words_words, &mut break_args)
}
}
// break_simple implements a "greedy" breaking algorithm: print words until
// maxlength would be exceeded, then print a linebreak and indent and continue.
fn break_simple<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
iter.try_fold((args.init_len, false), |l, winfo| {
accum_words_simple(args, l, winfo)
})?;
args.ostream.write_all(b"\n")
}
fn accum_words_simple<'a>(
args: &mut BreakArgs<'a>,
(l, prev_punct): (usize, bool),
winfo: &'a WordInfo<'a>,
) -> std::io::Result<(usize, bool)> {
// compute the length of this word, considering how tabs will expand at this position on the line
let wlen = winfo.word_nchars + args.compute_width(winfo, l, false);
let slen = compute_slen(
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
if l + wlen + slen > args.opts.width {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
Ok((args.indent_len + winfo.word_nchars, winfo.ends_punct))
} else {
write_with_spaces(winfo.word, slen, args.ostream)?;
Ok((l + wlen + slen, winfo.ends_punct))
}
}
// break_knuth_plass implements an "optimal" breaking algorithm in the style of
// Knuth, D.E., and Plass, M.F. "Breaking Paragraphs into Lines." in Software,
// Practice and Experience. Vol. 11, No. 11, November 1981.
// http://onlinelibrary.wiley.com/doi/10.1002/spe.4380111102/pdf
fn break_knuth_plass<'a, T: Clone + Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
// run the algorithm to get the breakpoints
let breakpoints = find_kp_breakpoints(iter.clone(), args);
// iterate through the breakpoints (note that breakpoints is in reverse break order, so we .rev() it
let result: std::io::Result<(bool, bool)> = breakpoints.iter().rev().try_fold(
(false, false),
|(mut prev_punct, mut fresh), &(next_break, break_before)| {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
// at each breakpoint, keep emitting words until we find the word matching this breakpoint
for winfo in &mut iter {
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
fresh = false;
prev_punct = winfo.ends_punct;
// We find identical breakpoints here by comparing addresses of the references.
// This is OK because the backing vector is not mutating once we are linebreaking.
let winfo_ptr = winfo as *const _;
let next_break_ptr = next_break as *const _;
if winfo_ptr == next_break_ptr {
// OK, we found the matching word
if break_before {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
} else {
// breaking after this word, so that means "fresh" is true for the next iteration
write_with_spaces(word, slen, args.ostream)?;
fresh = true;
}
break;
} else {
write_with_spaces(word, slen, args.ostream)?;
}
}
Ok((prev_punct, fresh))
},
);
let (mut prev_punct, mut fresh) = result?;
// after the last linebreak, write out the rest of the final line.
for winfo in iter {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
prev_punct = winfo.ends_punct;
fresh = false;
write_with_spaces(word, slen, args.ostream)?;
}
args.ostream.write_all(b"\n")
}
struct LineBreak<'a> {
prev: usize,
linebreak: Option<&'a WordInfo<'a>>,
break_before: bool,
demerits: i64,
prev_rat: f32,
length: usize,
fresh: bool,
}
#[allow(clippy::cognitive_complexity)]
fn find_kp_breakpoints<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
iter: T,
args: &BreakArgs<'a>,
) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut iter = iter.peekable();
// set up the initial null linebreak
let mut linebreaks = vec![LineBreak {
prev: 0,
linebreak: None,
break_before: false,
demerits: 0,
prev_rat: 0.0f32,
length: args.init_len,
fresh: false,
}];
// this vec holds the current active linebreaks; next_ holds the breaks that will be active for
// the next word
let active_breaks = &mut vec![0];
let next_active_breaks = &mut vec![];
let stretch = (args.opts.width - args.opts.goal) as isize;
let minlength = args.opts.goal - stretch as usize;
let mut new_linebreaks = vec![];
let mut is_sentence_start = false;
let mut least_demerits = 0;
loop {
let w = match iter.next() {
None => break,
Some(w) => w,
};
// if this is the last word, we don't add additional demerits for this break
let (is_last_word, is_sentence_end) = match iter.peek() {
None => (true, true),
Some(&&WordInfo {
sentence_start: st,
new_line: nl,
..
}) => (false, st || (nl && w.ends_punct)),
};
// should we be adding extra space at the beginning of the next sentence?
let slen = compute_slen(args.uniform, w.new_line, is_sentence_start, false);
let mut ld_new = i64::MAX;
let mut ld_next = i64::MAX;
let mut ld_idx = 0;
new_linebreaks.clear();
next_active_breaks.clear();
// go through each active break, extending it and possibly adding a new active
// break if we are above the minimum required length
#[allow(clippy::explicit_iter_loop)]
for &i in active_breaks.iter() {
let active = &mut linebreaks[i];
// normalize demerits to avoid overflow, and record if this is the least
active.demerits -= least_demerits;
if active.demerits < ld_next {
ld_next = active.demerits;
ld_idx = i;
}
// get the new length
let tlen = w.word_nchars
+ args.compute_width(w, active.length, active.fresh)
+ slen
+ active.length;
// if tlen is longer than args.opts.width, we drop this break from the active list
// otherwise, we extend the break, and possibly add a new break at this point
if tlen <= args.opts.width {
// this break will still be active next time
next_active_breaks.push(i);
// we can put this word on this line
active.fresh = false;
active.length = tlen;
// if we're above the minlength, we can also consider breaking here
if tlen >= minlength {
let (new_demerits, new_ratio) = if is_last_word {
// there is no penalty for the final line's length
(0, 0.0)
} else {
compute_demerits(
args.opts.goal as isize - tlen as isize,
stretch,
w.word_nchars as isize,
active.prev_rat,
)
};
// do not even consider adding a line that has too many demerits
// also, try to detect overflow by checking signum
let total_demerits = new_demerits + active.demerits;
if new_demerits < BAD_INFTY_SQ
&& total_demerits < ld_new
&& active.demerits.signum() <= new_demerits.signum()
{
ld_new = total_demerits;
new_linebreaks.push(LineBreak {
prev: i,
linebreak: Some(w),
break_before: false,
demerits: total_demerits,
prev_rat: new_ratio,
length: args.indent_len,
fresh: true,
});
}
}
}
}
// if we generated any new linebreaks, add the last one to the list
// the last one is always the best because we don't add to new_linebreaks unless
// it's better than the best one so far
match new_linebreaks.pop() {
None => (),
Some(lb) => {
next_active_breaks.push(linebreaks.len());
linebreaks.push(lb);
}
}
if next_active_breaks.is_empty() {
// every potential linebreak is too long! choose the linebreak with the least demerits, ld_idx
let new_break =
restart_active_breaks(args, &linebreaks[ld_idx], ld_idx, w, slen, minlength);
next_active_breaks.push(linebreaks.len());
linebreaks.push(new_break);
least_demerits = 0;
} else {
// next time around, normalize out the demerits fields
// on active linebreaks to make overflow less likely
least_demerits = cmp::max(ld_next, 0);
}
// swap in new list of active breaks
mem::swap(active_breaks, next_active_breaks);
// If this was the last word in a sentence, the next one must be the first in the next.
is_sentence_start = is_sentence_end;
}
// return the best path
build_best_path(&linebreaks, active_breaks)
}
fn | <'a>(paths: &[LineBreak<'a>], active: &[usize]) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut breakwords = vec![];
// of the active paths, we select the one with the fewest demerits
let mut best_idx = match active.iter().min_by_key(|&&a| paths[a].demerits) {
None => crash!(
1,
"Failed to find a k-p linebreak solution. This should never happen."
),
Some(&s) => s,
};
// now, chase the pointers back through the break list, recording
// the words at which we should break
loop {
let next_best = &paths[best_idx];
match next_best.linebreak {
None => return breakwords,
Some(prev) => {
breakwords.push((prev, next_best.break_before));
best_idx = next_best.prev;
}
}
}
}
// "infinite" badness is more like (1+BAD_INFTY)^2 because of how demerits are computed
const BAD_INFTY: i64 = 10_000_000;
const BAD_INFTY_SQ: i64 = BAD_INFTY * BAD_INFTY;
// badness = BAD_MULT * abs(r) ^ 3
const BAD_MULT: f32 = 100.0;
// DR_MULT is multiplier for delta-R between lines
const DR_MULT: f32 = 600.0;
// DL_MULT is penalty multiplier for short words at end of line
const DL_MULT: f32 = 300.0;
fn compute_demerits(delta_len: isize, stretch: isize, wlen: isize, prev_rat: f32) -> (i64, f32) {
// how much stretch are we using?
let ratio = if delta_len == 0 {
0.0f32
} else {
delta_len as f32 / stretch as f32
};
// compute badness given the stretch ratio
let bad_linelen = if ratio.abs() > 1.0f32 {
BAD_INFTY
} else {
(BAD_MULT * ratio.powi(3).abs()) as i64
};
// we penalize lines ending in really short words
let bad_wordlen = if wlen >= stretch {
0
} else {
(DL_MULT
* ((stretch - wlen) as f32 / (stretch - 1) as f32)
.powi(3)
.abs()) as i64
};
// we penalize lines that have very different ratios from previous lines
let bad_delta_r = (DR_MULT * (((ratio - prev_rat) / 2.0).powi(3)).abs()) as i64;
let demerits = i64::pow(1 + bad_linelen + bad_wordlen + bad_delta_r, 2);
(demerits, ratio)
}
fn restart_active_breaks<'a>(
args: &BreakArgs<'a>,
active: &LineBreak<'a>,
act_idx: usize,
w: &'a WordInfo<'a>,
slen: usize,
min: usize,
) -> LineBreak<'a> {
let (break_before, line_length) = if active.fresh {
// never break before a word if that word would be the first on a line
(false, args.indent_len)
} else {
// choose the lesser evil: breaking too early, or breaking too late
let wlen = w.word_nchars + args.compute_width(w, active.length, active.fresh);
let underlen = (min - active.length) as isize;
let overlen = ((wlen + slen + active.length) - args.opts.width) as isize;
if overlen > underlen {
// break early, put this word on the next line
(true, args.indent_len + w.word_nchars)
} else {
(false, args.indent_len)
}
};
// restart the linebreak. This will be our only active path.
LineBreak {
prev: act_idx,
linebreak: Some(w),
break_before,
demerits: 0, // this is the only active break, so we can reset the demerit count
prev_rat: if break_before { 1.0 } else { -1.0 },
length: line_length,
fresh: !break_before,
}
}
// Number of spaces to add before a word, based on mode, newline, sentence start.
fn compute_slen(uniform: bool, newline: bool, start: bool, punct: bool) -> usize {
if uniform || newline {
if start || (newline && punct) {
2
} else {
1
}
} else {
0
}
}
// If we're on a fresh line, slen=0 and we slice off leading whitespace.
// Otherwise, compute slen and leave whitespace alone.
fn slice_if_fresh(
fresh: bool,
word: &str,
start: usize,
uniform: bool,
newline: bool,
sstart: bool,
punct: bool,
) -> (usize, &str) {
if fresh {
(0, &word[start..])
} else {
(compute_slen(uniform, newline, sstart, punct), word)
}
}
// Write a newline and add the indent.
fn write_newline(indent: &str, ostream: &mut BufWriter<Stdout>) -> std::io::Result<()> {
ostream.write_all(b"\n")?;
ostream.write_all(indent.as_bytes())
}
// Write the word, along with slen spaces.
fn write_with_spaces(
word: &str,
slen: usize,
ostream: &mut BufWriter<Stdout>,
) -> std::io::Result<()> {
if slen == 2 {
ostream.write_all(b" ")?;
} else if slen == 1 {
ostream.write_all(b" ")?;
}
ostream.write_all(word.as_bytes())
}
| build_best_path | identifier_name |
linebreak.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) INFTY MULT accum breakwords linebreak linebreaking linebreaks linelen maxlength minlength nchars ostream overlen parasplit plass posn powf punct signum slen sstart tabwidth tlen underlen winfo wlen wordlen
use std::cmp;
use std::i64;
use std::io::{BufWriter, Stdout, Write};
use std::mem;
use uucore::crash;
use crate::parasplit::{ParaWords, Paragraph, WordInfo};
use crate::FmtOptions;
struct BreakArgs<'a> {
opts: &'a FmtOptions,
init_len: usize,
indent_str: &'a str,
indent_len: usize,
uniform: bool,
ostream: &'a mut BufWriter<Stdout>,
}
impl<'a> BreakArgs<'a> {
fn compute_width(&self, winfo: &WordInfo, posn: usize, fresh: bool) -> usize {
if fresh {
0
} else {
let post = winfo.after_tab;
match winfo.before_tab {
None => post,
Some(pre) => {
post + ((pre + posn) / self.opts.tabwidth + 1) * self.opts.tabwidth - posn
}
}
}
}
}
pub fn break_lines(
para: &Paragraph,
opts: &FmtOptions,
ostream: &mut BufWriter<Stdout>,
) -> std::io::Result<()> {
// indent
let p_indent = ¶.indent_str[..];
let p_indent_len = para.indent_len;
// words
let p_words = ParaWords::new(opts, para);
let mut p_words_words = p_words.words();
// the first word will *always* appear on the first line
// make sure of this here
let (w, w_len) = match p_words_words.next() {
Some(winfo) => (winfo.word, winfo.word_nchars),
None => {
return ostream.write_all(b"\n");
}
};
// print the init, if it exists, and get its length
let p_init_len = w_len
+ if opts.crown || opts.tagged {
// handle "init" portion
ostream.write_all(para.init_str.as_bytes())?;
para.init_len
} else if !para.mail_header {
// for non-(crown, tagged) that's the same as a normal indent
ostream.write_all(p_indent.as_bytes())?;
p_indent_len
} else {
// except that mail headers get no indent at all
0
};
// write first word after writing init
ostream.write_all(w.as_bytes())?;
// does this paragraph require uniform spacing?
let uniform = para.mail_header || opts.uniform;
let mut break_args = BreakArgs {
opts,
init_len: p_init_len,
indent_str: p_indent,
indent_len: p_indent_len,
uniform,
ostream,
};
if opts.quick || para.mail_header {
break_simple(p_words_words, &mut break_args)
} else |
}
// break_simple implements a "greedy" breaking algorithm: print words until
// maxlength would be exceeded, then print a linebreak and indent and continue.
fn break_simple<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
iter.try_fold((args.init_len, false), |l, winfo| {
accum_words_simple(args, l, winfo)
})?;
args.ostream.write_all(b"\n")
}
fn accum_words_simple<'a>(
args: &mut BreakArgs<'a>,
(l, prev_punct): (usize, bool),
winfo: &'a WordInfo<'a>,
) -> std::io::Result<(usize, bool)> {
// compute the length of this word, considering how tabs will expand at this position on the line
let wlen = winfo.word_nchars + args.compute_width(winfo, l, false);
let slen = compute_slen(
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
if l + wlen + slen > args.opts.width {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
Ok((args.indent_len + winfo.word_nchars, winfo.ends_punct))
} else {
write_with_spaces(winfo.word, slen, args.ostream)?;
Ok((l + wlen + slen, winfo.ends_punct))
}
}
// break_knuth_plass implements an "optimal" breaking algorithm in the style of
// Knuth, D.E., and Plass, M.F. "Breaking Paragraphs into Lines." in Software,
// Practice and Experience. Vol. 11, No. 11, November 1981.
// http://onlinelibrary.wiley.com/doi/10.1002/spe.4380111102/pdf
fn break_knuth_plass<'a, T: Clone + Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
// run the algorithm to get the breakpoints
let breakpoints = find_kp_breakpoints(iter.clone(), args);
// iterate through the breakpoints (note that breakpoints is in reverse break order, so we .rev() it
let result: std::io::Result<(bool, bool)> = breakpoints.iter().rev().try_fold(
(false, false),
|(mut prev_punct, mut fresh), &(next_break, break_before)| {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
// at each breakpoint, keep emitting words until we find the word matching this breakpoint
for winfo in &mut iter {
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
fresh = false;
prev_punct = winfo.ends_punct;
// We find identical breakpoints here by comparing addresses of the references.
// This is OK because the backing vector is not mutating once we are linebreaking.
let winfo_ptr = winfo as *const _;
let next_break_ptr = next_break as *const _;
if winfo_ptr == next_break_ptr {
// OK, we found the matching word
if break_before {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
} else {
// breaking after this word, so that means "fresh" is true for the next iteration
write_with_spaces(word, slen, args.ostream)?;
fresh = true;
}
break;
} else {
write_with_spaces(word, slen, args.ostream)?;
}
}
Ok((prev_punct, fresh))
},
);
let (mut prev_punct, mut fresh) = result?;
// after the last linebreak, write out the rest of the final line.
for winfo in iter {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
prev_punct = winfo.ends_punct;
fresh = false;
write_with_spaces(word, slen, args.ostream)?;
}
args.ostream.write_all(b"\n")
}
struct LineBreak<'a> {
prev: usize,
linebreak: Option<&'a WordInfo<'a>>,
break_before: bool,
demerits: i64,
prev_rat: f32,
length: usize,
fresh: bool,
}
#[allow(clippy::cognitive_complexity)]
fn find_kp_breakpoints<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
iter: T,
args: &BreakArgs<'a>,
) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut iter = iter.peekable();
// set up the initial null linebreak
let mut linebreaks = vec![LineBreak {
prev: 0,
linebreak: None,
break_before: false,
demerits: 0,
prev_rat: 0.0f32,
length: args.init_len,
fresh: false,
}];
// this vec holds the current active linebreaks; next_ holds the breaks that will be active for
// the next word
let active_breaks = &mut vec![0];
let next_active_breaks = &mut vec![];
let stretch = (args.opts.width - args.opts.goal) as isize;
let minlength = args.opts.goal - stretch as usize;
let mut new_linebreaks = vec![];
let mut is_sentence_start = false;
let mut least_demerits = 0;
loop {
let w = match iter.next() {
None => break,
Some(w) => w,
};
// if this is the last word, we don't add additional demerits for this break
let (is_last_word, is_sentence_end) = match iter.peek() {
None => (true, true),
Some(&&WordInfo {
sentence_start: st,
new_line: nl,
..
}) => (false, st || (nl && w.ends_punct)),
};
// should we be adding extra space at the beginning of the next sentence?
let slen = compute_slen(args.uniform, w.new_line, is_sentence_start, false);
let mut ld_new = i64::MAX;
let mut ld_next = i64::MAX;
let mut ld_idx = 0;
new_linebreaks.clear();
next_active_breaks.clear();
// go through each active break, extending it and possibly adding a new active
// break if we are above the minimum required length
#[allow(clippy::explicit_iter_loop)]
for &i in active_breaks.iter() {
let active = &mut linebreaks[i];
// normalize demerits to avoid overflow, and record if this is the least
active.demerits -= least_demerits;
if active.demerits < ld_next {
ld_next = active.demerits;
ld_idx = i;
}
// get the new length
let tlen = w.word_nchars
+ args.compute_width(w, active.length, active.fresh)
+ slen
+ active.length;
// if tlen is longer than args.opts.width, we drop this break from the active list
// otherwise, we extend the break, and possibly add a new break at this point
if tlen <= args.opts.width {
// this break will still be active next time
next_active_breaks.push(i);
// we can put this word on this line
active.fresh = false;
active.length = tlen;
// if we're above the minlength, we can also consider breaking here
if tlen >= minlength {
let (new_demerits, new_ratio) = if is_last_word {
// there is no penalty for the final line's length
(0, 0.0)
} else {
compute_demerits(
args.opts.goal as isize - tlen as isize,
stretch,
w.word_nchars as isize,
active.prev_rat,
)
};
// do not even consider adding a line that has too many demerits
// also, try to detect overflow by checking signum
let total_demerits = new_demerits + active.demerits;
if new_demerits < BAD_INFTY_SQ
&& total_demerits < ld_new
&& active.demerits.signum() <= new_demerits.signum()
{
ld_new = total_demerits;
new_linebreaks.push(LineBreak {
prev: i,
linebreak: Some(w),
break_before: false,
demerits: total_demerits,
prev_rat: new_ratio,
length: args.indent_len,
fresh: true,
});
}
}
}
}
// if we generated any new linebreaks, add the last one to the list
// the last one is always the best because we don't add to new_linebreaks unless
// it's better than the best one so far
match new_linebreaks.pop() {
None => (),
Some(lb) => {
next_active_breaks.push(linebreaks.len());
linebreaks.push(lb);
}
}
if next_active_breaks.is_empty() {
// every potential linebreak is too long! choose the linebreak with the least demerits, ld_idx
let new_break =
restart_active_breaks(args, &linebreaks[ld_idx], ld_idx, w, slen, minlength);
next_active_breaks.push(linebreaks.len());
linebreaks.push(new_break);
least_demerits = 0;
} else {
// next time around, normalize out the demerits fields
// on active linebreaks to make overflow less likely
least_demerits = cmp::max(ld_next, 0);
}
// swap in new list of active breaks
mem::swap(active_breaks, next_active_breaks);
// If this was the last word in a sentence, the next one must be the first in the next.
is_sentence_start = is_sentence_end;
}
// return the best path
build_best_path(&linebreaks, active_breaks)
}
fn build_best_path<'a>(paths: &[LineBreak<'a>], active: &[usize]) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut breakwords = vec![];
// of the active paths, we select the one with the fewest demerits
let mut best_idx = match active.iter().min_by_key(|&&a| paths[a].demerits) {
None => crash!(
1,
"Failed to find a k-p linebreak solution. This should never happen."
),
Some(&s) => s,
};
// now, chase the pointers back through the break list, recording
// the words at which we should break
loop {
let next_best = &paths[best_idx];
match next_best.linebreak {
None => return breakwords,
Some(prev) => {
breakwords.push((prev, next_best.break_before));
best_idx = next_best.prev;
}
}
}
}
// "infinite" badness is more like (1+BAD_INFTY)^2 because of how demerits are computed
const BAD_INFTY: i64 = 10_000_000;
const BAD_INFTY_SQ: i64 = BAD_INFTY * BAD_INFTY;
// badness = BAD_MULT * abs(r) ^ 3
const BAD_MULT: f32 = 100.0;
// DR_MULT is multiplier for delta-R between lines
const DR_MULT: f32 = 600.0;
// DL_MULT is penalty multiplier for short words at end of line
const DL_MULT: f32 = 300.0;
fn compute_demerits(delta_len: isize, stretch: isize, wlen: isize, prev_rat: f32) -> (i64, f32) {
// how much stretch are we using?
let ratio = if delta_len == 0 {
0.0f32
} else {
delta_len as f32 / stretch as f32
};
// compute badness given the stretch ratio
let bad_linelen = if ratio.abs() > 1.0f32 {
BAD_INFTY
} else {
(BAD_MULT * ratio.powi(3).abs()) as i64
};
// we penalize lines ending in really short words
let bad_wordlen = if wlen >= stretch {
0
} else {
(DL_MULT
* ((stretch - wlen) as f32 / (stretch - 1) as f32)
.powi(3)
.abs()) as i64
};
// we penalize lines that have very different ratios from previous lines
let bad_delta_r = (DR_MULT * (((ratio - prev_rat) / 2.0).powi(3)).abs()) as i64;
let demerits = i64::pow(1 + bad_linelen + bad_wordlen + bad_delta_r, 2);
(demerits, ratio)
}
fn restart_active_breaks<'a>(
args: &BreakArgs<'a>,
active: &LineBreak<'a>,
act_idx: usize,
w: &'a WordInfo<'a>,
slen: usize,
min: usize,
) -> LineBreak<'a> {
let (break_before, line_length) = if active.fresh {
// never break before a word if that word would be the first on a line
(false, args.indent_len)
} else {
// choose the lesser evil: breaking too early, or breaking too late
let wlen = w.word_nchars + args.compute_width(w, active.length, active.fresh);
let underlen = (min - active.length) as isize;
let overlen = ((wlen + slen + active.length) - args.opts.width) as isize;
if overlen > underlen {
// break early, put this word on the next line
(true, args.indent_len + w.word_nchars)
} else {
(false, args.indent_len)
}
};
// restart the linebreak. This will be our only active path.
LineBreak {
prev: act_idx,
linebreak: Some(w),
break_before,
demerits: 0, // this is the only active break, so we can reset the demerit count
prev_rat: if break_before { 1.0 } else { -1.0 },
length: line_length,
fresh: !break_before,
}
}
// Number of spaces to add before a word, based on mode, newline, sentence start.
fn compute_slen(uniform: bool, newline: bool, start: bool, punct: bool) -> usize {
if uniform || newline {
if start || (newline && punct) {
2
} else {
1
}
} else {
0
}
}
// If we're on a fresh line, slen=0 and we slice off leading whitespace.
// Otherwise, compute slen and leave whitespace alone.
fn slice_if_fresh(
fresh: bool,
word: &str,
start: usize,
uniform: bool,
newline: bool,
sstart: bool,
punct: bool,
) -> (usize, &str) {
if fresh {
(0, &word[start..])
} else {
(compute_slen(uniform, newline, sstart, punct), word)
}
}
// Write a newline and add the indent.
fn write_newline(indent: &str, ostream: &mut BufWriter<Stdout>) -> std::io::Result<()> {
ostream.write_all(b"\n")?;
ostream.write_all(indent.as_bytes())
}
// Write the word, along with slen spaces.
fn write_with_spaces(
word: &str,
slen: usize,
ostream: &mut BufWriter<Stdout>,
) -> std::io::Result<()> {
if slen == 2 {
ostream.write_all(b" ")?;
} else if slen == 1 {
ostream.write_all(b" ")?;
}
ostream.write_all(word.as_bytes())
}
| {
break_knuth_plass(p_words_words, &mut break_args)
} | conditional_block |
linebreak.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) INFTY MULT accum breakwords linebreak linebreaking linebreaks linelen maxlength minlength nchars ostream overlen parasplit plass posn powf punct signum slen sstart tabwidth tlen underlen winfo wlen wordlen
use std::cmp;
use std::i64;
use std::io::{BufWriter, Stdout, Write};
use std::mem;
use uucore::crash;
use crate::parasplit::{ParaWords, Paragraph, WordInfo};
use crate::FmtOptions;
struct BreakArgs<'a> {
opts: &'a FmtOptions,
init_len: usize,
indent_str: &'a str,
indent_len: usize,
uniform: bool,
ostream: &'a mut BufWriter<Stdout>,
}
impl<'a> BreakArgs<'a> {
fn compute_width(&self, winfo: &WordInfo, posn: usize, fresh: bool) -> usize {
if fresh {
0
} else {
let post = winfo.after_tab;
match winfo.before_tab {
None => post,
Some(pre) => {
post + ((pre + posn) / self.opts.tabwidth + 1) * self.opts.tabwidth - posn
}
}
}
}
}
pub fn break_lines(
para: &Paragraph,
opts: &FmtOptions,
ostream: &mut BufWriter<Stdout>,
) -> std::io::Result<()> {
// indent
let p_indent = ¶.indent_str[..];
let p_indent_len = para.indent_len;
// words
let p_words = ParaWords::new(opts, para);
let mut p_words_words = p_words.words();
// the first word will *always* appear on the first line
// make sure of this here
let (w, w_len) = match p_words_words.next() {
Some(winfo) => (winfo.word, winfo.word_nchars),
None => {
return ostream.write_all(b"\n");
}
};
// print the init, if it exists, and get its length
let p_init_len = w_len
+ if opts.crown || opts.tagged {
// handle "init" portion
ostream.write_all(para.init_str.as_bytes())?;
para.init_len
} else if !para.mail_header {
// for non-(crown, tagged) that's the same as a normal indent
ostream.write_all(p_indent.as_bytes())?;
p_indent_len
} else {
// except that mail headers get no indent at all
0
};
// write first word after writing init
ostream.write_all(w.as_bytes())?;
// does this paragraph require uniform spacing?
let uniform = para.mail_header || opts.uniform;
let mut break_args = BreakArgs {
opts,
init_len: p_init_len,
indent_str: p_indent,
indent_len: p_indent_len,
uniform,
ostream,
};
if opts.quick || para.mail_header {
break_simple(p_words_words, &mut break_args)
} else {
break_knuth_plass(p_words_words, &mut break_args)
}
}
// break_simple implements a "greedy" breaking algorithm: print words until
// maxlength would be exceeded, then print a linebreak and indent and continue.
fn break_simple<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
iter.try_fold((args.init_len, false), |l, winfo| {
accum_words_simple(args, l, winfo)
})?;
args.ostream.write_all(b"\n")
}
fn accum_words_simple<'a>(
args: &mut BreakArgs<'a>,
(l, prev_punct): (usize, bool),
winfo: &'a WordInfo<'a>,
) -> std::io::Result<(usize, bool)> {
// compute the length of this word, considering how tabs will expand at this position on the line
let wlen = winfo.word_nchars + args.compute_width(winfo, l, false);
let slen = compute_slen(
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
if l + wlen + slen > args.opts.width {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
Ok((args.indent_len + winfo.word_nchars, winfo.ends_punct))
} else {
write_with_spaces(winfo.word, slen, args.ostream)?;
Ok((l + wlen + slen, winfo.ends_punct))
}
}
// break_knuth_plass implements an "optimal" breaking algorithm in the style of
// Knuth, D.E., and Plass, M.F. "Breaking Paragraphs into Lines." in Software,
// Practice and Experience. Vol. 11, No. 11, November 1981.
// http://onlinelibrary.wiley.com/doi/10.1002/spe.4380111102/pdf
fn break_knuth_plass<'a, T: Clone + Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
// run the algorithm to get the breakpoints
let breakpoints = find_kp_breakpoints(iter.clone(), args);
// iterate through the breakpoints (note that breakpoints is in reverse break order, so we .rev() it
let result: std::io::Result<(bool, bool)> = breakpoints.iter().rev().try_fold(
(false, false),
|(mut prev_punct, mut fresh), &(next_break, break_before)| {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
// at each breakpoint, keep emitting words until we find the word matching this breakpoint
for winfo in &mut iter {
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
fresh = false;
prev_punct = winfo.ends_punct;
// We find identical breakpoints here by comparing addresses of the references.
// This is OK because the backing vector is not mutating once we are linebreaking.
let winfo_ptr = winfo as *const _;
let next_break_ptr = next_break as *const _;
if winfo_ptr == next_break_ptr {
// OK, we found the matching word
if break_before {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
} else {
// breaking after this word, so that means "fresh" is true for the next iteration
write_with_spaces(word, slen, args.ostream)?;
fresh = true;
}
break;
} else {
write_with_spaces(word, slen, args.ostream)?;
}
}
Ok((prev_punct, fresh))
},
);
let (mut prev_punct, mut fresh) = result?;
// after the last linebreak, write out the rest of the final line.
for winfo in iter {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
prev_punct = winfo.ends_punct;
fresh = false;
write_with_spaces(word, slen, args.ostream)?;
}
args.ostream.write_all(b"\n")
}
struct LineBreak<'a> {
prev: usize,
linebreak: Option<&'a WordInfo<'a>>,
break_before: bool,
demerits: i64,
prev_rat: f32,
length: usize,
fresh: bool,
}
#[allow(clippy::cognitive_complexity)]
fn find_kp_breakpoints<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
iter: T,
args: &BreakArgs<'a>,
) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut iter = iter.peekable();
// set up the initial null linebreak
let mut linebreaks = vec![LineBreak {
prev: 0,
linebreak: None,
break_before: false,
demerits: 0,
prev_rat: 0.0f32,
length: args.init_len,
fresh: false,
}];
// this vec holds the current active linebreaks; next_ holds the breaks that will be active for
// the next word
let active_breaks = &mut vec![0];
let next_active_breaks = &mut vec![];
let stretch = (args.opts.width - args.opts.goal) as isize;
let minlength = args.opts.goal - stretch as usize;
let mut new_linebreaks = vec![];
let mut is_sentence_start = false;
let mut least_demerits = 0;
loop {
let w = match iter.next() {
None => break,
Some(w) => w,
};
// if this is the last word, we don't add additional demerits for this break
let (is_last_word, is_sentence_end) = match iter.peek() {
None => (true, true),
Some(&&WordInfo {
sentence_start: st,
new_line: nl,
..
}) => (false, st || (nl && w.ends_punct)),
};
// should we be adding extra space at the beginning of the next sentence?
let slen = compute_slen(args.uniform, w.new_line, is_sentence_start, false);
let mut ld_new = i64::MAX;
let mut ld_next = i64::MAX;
let mut ld_idx = 0;
new_linebreaks.clear();
next_active_breaks.clear();
// go through each active break, extending it and possibly adding a new active
// break if we are above the minimum required length
#[allow(clippy::explicit_iter_loop)]
for &i in active_breaks.iter() {
let active = &mut linebreaks[i];
// normalize demerits to avoid overflow, and record if this is the least
active.demerits -= least_demerits;
if active.demerits < ld_next {
ld_next = active.demerits;
ld_idx = i;
}
// get the new length
let tlen = w.word_nchars
+ args.compute_width(w, active.length, active.fresh)
+ slen
+ active.length;
// if tlen is longer than args.opts.width, we drop this break from the active list
// otherwise, we extend the break, and possibly add a new break at this point
if tlen <= args.opts.width {
// this break will still be active next time
next_active_breaks.push(i);
// we can put this word on this line
active.fresh = false;
active.length = tlen;
// if we're above the minlength, we can also consider breaking here
if tlen >= minlength {
let (new_demerits, new_ratio) = if is_last_word {
// there is no penalty for the final line's length
(0, 0.0)
} else {
compute_demerits(
args.opts.goal as isize - tlen as isize,
stretch,
w.word_nchars as isize,
active.prev_rat,
)
};
// do not even consider adding a line that has too many demerits
// also, try to detect overflow by checking signum
let total_demerits = new_demerits + active.demerits;
if new_demerits < BAD_INFTY_SQ
&& total_demerits < ld_new
&& active.demerits.signum() <= new_demerits.signum()
{
ld_new = total_demerits;
new_linebreaks.push(LineBreak {
prev: i,
linebreak: Some(w),
break_before: false,
demerits: total_demerits,
prev_rat: new_ratio,
length: args.indent_len,
fresh: true,
});
}
}
}
}
// if we generated any new linebreaks, add the last one to the list
// the last one is always the best because we don't add to new_linebreaks unless
// it's better than the best one so far
match new_linebreaks.pop() {
None => (),
Some(lb) => {
next_active_breaks.push(linebreaks.len());
linebreaks.push(lb);
}
}
if next_active_breaks.is_empty() {
// every potential linebreak is too long! choose the linebreak with the least demerits, ld_idx
let new_break =
restart_active_breaks(args, &linebreaks[ld_idx], ld_idx, w, slen, minlength);
next_active_breaks.push(linebreaks.len());
linebreaks.push(new_break);
least_demerits = 0;
} else {
// next time around, normalize out the demerits fields
// on active linebreaks to make overflow less likely
least_demerits = cmp::max(ld_next, 0);
}
// swap in new list of active breaks
mem::swap(active_breaks, next_active_breaks);
// If this was the last word in a sentence, the next one must be the first in the next.
is_sentence_start = is_sentence_end;
}
// return the best path
build_best_path(&linebreaks, active_breaks)
}
fn build_best_path<'a>(paths: &[LineBreak<'a>], active: &[usize]) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut breakwords = vec![];
// of the active paths, we select the one with the fewest demerits
let mut best_idx = match active.iter().min_by_key(|&&a| paths[a].demerits) {
None => crash!(
1,
"Failed to find a k-p linebreak solution. This should never happen."
),
Some(&s) => s,
};
// now, chase the pointers back through the break list, recording
// the words at which we should break
loop {
let next_best = &paths[best_idx];
match next_best.linebreak {
None => return breakwords,
Some(prev) => {
breakwords.push((prev, next_best.break_before));
best_idx = next_best.prev;
}
}
}
}
// "infinite" badness is more like (1+BAD_INFTY)^2 because of how demerits are computed
const BAD_INFTY: i64 = 10_000_000;
const BAD_INFTY_SQ: i64 = BAD_INFTY * BAD_INFTY;
// badness = BAD_MULT * abs(r) ^ 3
const BAD_MULT: f32 = 100.0;
// DR_MULT is multiplier for delta-R between lines
const DR_MULT: f32 = 600.0;
// DL_MULT is penalty multiplier for short words at end of line
const DL_MULT: f32 = 300.0;
fn compute_demerits(delta_len: isize, stretch: isize, wlen: isize, prev_rat: f32) -> (i64, f32) {
// how much stretch are we using?
let ratio = if delta_len == 0 {
0.0f32
} else {
delta_len as f32 / stretch as f32
};
// compute badness given the stretch ratio
let bad_linelen = if ratio.abs() > 1.0f32 {
BAD_INFTY
} else {
(BAD_MULT * ratio.powi(3).abs()) as i64
};
// we penalize lines ending in really short words
let bad_wordlen = if wlen >= stretch {
0
} else {
(DL_MULT
* ((stretch - wlen) as f32 / (stretch - 1) as f32)
.powi(3)
.abs()) as i64
};
// we penalize lines that have very different ratios from previous lines
let bad_delta_r = (DR_MULT * (((ratio - prev_rat) / 2.0).powi(3)).abs()) as i64;
let demerits = i64::pow(1 + bad_linelen + bad_wordlen + bad_delta_r, 2);
(demerits, ratio)
}
fn restart_active_breaks<'a>(
args: &BreakArgs<'a>,
active: &LineBreak<'a>,
act_idx: usize,
w: &'a WordInfo<'a>,
slen: usize,
min: usize,
) -> LineBreak<'a> {
let (break_before, line_length) = if active.fresh {
// never break before a word if that word would be the first on a line
(false, args.indent_len)
} else {
// choose the lesser evil: breaking too early, or breaking too late
let wlen = w.word_nchars + args.compute_width(w, active.length, active.fresh);
let underlen = (min - active.length) as isize;
let overlen = ((wlen + slen + active.length) - args.opts.width) as isize;
if overlen > underlen {
// break early, put this word on the next line
(true, args.indent_len + w.word_nchars)
} else {
(false, args.indent_len)
}
};
// restart the linebreak. This will be our only active path.
LineBreak {
prev: act_idx,
linebreak: Some(w),
break_before,
demerits: 0, // this is the only active break, so we can reset the demerit count
prev_rat: if break_before { 1.0 } else { -1.0 },
length: line_length,
fresh: !break_before,
}
}
// Number of spaces to add before a word, based on mode, newline, sentence start.
fn compute_slen(uniform: bool, newline: bool, start: bool, punct: bool) -> usize { | 1
}
} else {
0
}
}
// If we're on a fresh line, slen=0 and we slice off leading whitespace.
// Otherwise, compute slen and leave whitespace alone.
fn slice_if_fresh(
fresh: bool,
word: &str,
start: usize,
uniform: bool,
newline: bool,
sstart: bool,
punct: bool,
) -> (usize, &str) {
if fresh {
(0, &word[start..])
} else {
(compute_slen(uniform, newline, sstart, punct), word)
}
}
// Write a newline and add the indent.
fn write_newline(indent: &str, ostream: &mut BufWriter<Stdout>) -> std::io::Result<()> {
ostream.write_all(b"\n")?;
ostream.write_all(indent.as_bytes())
}
// Write the word, along with slen spaces.
fn write_with_spaces(
word: &str,
slen: usize,
ostream: &mut BufWriter<Stdout>,
) -> std::io::Result<()> {
if slen == 2 {
ostream.write_all(b" ")?;
} else if slen == 1 {
ostream.write_all(b" ")?;
}
ostream.write_all(word.as_bytes())
} | if uniform || newline {
if start || (newline && punct) {
2
} else { | random_line_split |
Helper.js | /*
Also Jlow wants to implement a way to change the workout scorer and generator both if user chooses his own custom rest time
Commitment per week - if it changes halfway through --> skip workout --> if workout skipped or if current date doesn't match the date the next workout was supposed to happen calculated from date when last workout was done, change currentFitness
Fix the logic to suggest the second workout - Fix the way we implement workout score, workout difficulty, current fitness and target fitness. Figure out how to update current fitness, whether we are using the proper target fitness calculated from deltaDiff + workoutScore or deltaDiff + workout_difficulty to suggest the next workout, do we need to save current or target fitness
Jlow said high standard deviation (uneven set times) should result in a higher workout score. See results of beta test
Fitness degradation curve
edge cases - 20 min current vs 10 target --> suggest easier workout in the sense that he does smaller distances --> add easier workouts <-- also set a hardcoded ceiling dlifficultyMultiplier that warns user that the workout is too hard, do the workout at their own risk <-- see long-ass WA discussion
cycles of 3
convert Jlow's fillerworkout notes to unit tests
Some no-gos
1. The average time is more than 5% off the goal pace but the workout score is more than 95
2. The workout score exceeds 120(unless the values are ridiculous)
3. The training times/variance are not a realistic number(like 90s SD for 500m trainings)
Improve on overall fitness calculator
suggest pyramid workout
implement something like a linked list for filler workouts instead of the stupid 'end' field and having multiple objects
need to save goal and tempo pace as well
Avoid suggesting a workout that is too close to the IPPT date
Migrate database from weeks to timestamp for existing users
Original interval workout code suggester to use timestamp
Workout Frequency is 0 < x <= 3
Save the date of the first interval workout, change number of weeks to specific date
Basically we also need to regenerate the suggested workout everytime the user logs in, since the current date will change
Upon previous filler workout success
In fartlek workouts, rename goalPace to targetPace
*/
import {scoredWorkouts} from "./workoutScorer.js";
// All constants below TBC
const rho = 7.0;
// tePace, ltPace, vPace, stPace
const phi = [1, 1, 1, 1];
const paceConstants = [1.243, 1.19, 1.057, 0.89];
export const deltas = [0.41, 0.49, 0.55, 0.65, 0.73];
const getPace = (time) => time / 2400;
const convertSecToHour = (timeInSec) => timeInSec / (60 * 60);
export const getTargetPaces = (targetTime) => {
const targetPace = getPace(targetTime);
const displayPace = Math.floor(targetPace * 100 * 1000);
return {targetPace, displayPace}
}
export const convertToVelocity = (currentTime) => 2.4 / convertSecToHour(currentTime);
export const getPrescribedRest = (restMultiple, targetPace) => Math.round((restMultiple * targetPace * 100) / 5) * 5;
const restRatio = (restMultiple, targetPace) =>
getPrescribedRest(restMultiple, targetPace) / (restMultiple * targetPace * 100);
const restMultiplier = (workout, targetPace) => 1 / Math.exp(0.0024 * restRatio(workout.parts[0]["restMultiplier"], targetPace));
const convertToSeconds = (input) => {
return input.split(":").reduce((acc, ele, i) => {
if (i === 0) {
return acc + Number(ele * 60);
} else return acc + Number(ele);
}, 0);
}
const sanitiseWeekDateStamp = (timestamp) => timestamp - (timestamp % 86400000)
export const getPaces = (targetPace, cNewbieGains) =>
phi.map((phiValue, i) => targetPace * paceConstants[i] * cNewbieGains * phiValue)
export const getVelocities = (paces) =>
paces.map((pace) => (1 / pace) * 3.6);
const intermediateFunc = (delta, velocityOne, velocityTwo) =>
delta * velocityOne * Math.exp(velocityTwo - velocityOne);
export const getOverallFitness = (speedDifficulty, duration, currentFitness, previousWorkout) => {
const deltaDifficulty = speedDifficulty - 100;
const deltaDifficultyPerWeek = deltaDifficulty / duration;
if (Object.keys(previousWorkout).length < 1) return {newFitness: 100, targetDifficulty: 100 + deltaDifficultyPerWeek};
const previousWorkoutScore = scoredWorkouts(previousWorkout);
if (previousWorkoutScore.workoutScore < 94) {
return {newFitness: currentFitness, targetDifficulty: currentFitness + deltaDifficultyPerWeek};
}
return {
newFitness: previousWorkout.personalisedDifficultyMultiplier,
targetDifficulty: previousWorkout.personalisedDifficultyMultiplier + deltaDifficultyPerWeek,
};
/* To update code above with the following decision logic
1. Decision logic to determine if workout is a success or failure
a. If workout score is between 94-105, workout=success
b. If workout score < 94, workout=fail
c. If workout score > 105, workout= breakthrough
2. If workout turns out to be a failure or success
a. if previous workout is success, continue with next workout, change nothing
b. if workout is a failure and fail_count == 0,
i. Set k = 1.2, fail_count++
c. if workout is a failure and fail_count == 1,
i. Set x = P(avg)
d. if workout is a breakthrough and breakthrough_count == 0,
i. breakthrough count++
e. if workout is a breakthrough and breakthrough _count == 1,
i. Set x = P(avg)
*/
};
/// for the first time we are calling get_diffs, we use 100. For the second stage, we use the calculated diff
const checkDiff = (diffs, diff) => {
if (diffs[diff]) {
return diffs[diff];
}
return 100;
};
const getDiffs = (velocityToCompare, velocities, intermediateFunc, x = 1, differences = {}) => {
let diffs = {};
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
if (velocityToCompare < teVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") +
x * (deltas[0] * teVelocity * Math.exp(teVelocity - velocityToCompare));
} else if (velocityToCompare < ltVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") -
x * intermediateFunc(deltas[1], teVelocity, velocityToCompare);
} else if (velocityToCompare < vVelocity) {
diffs.ltDiff =
checkDiff(differences, "ltDiff") -
x * intermediateFunc(deltas[2], ltVelocity, velocityToCompare);
} else if (velocityToCompare < stVelocity) {
diffs.vDiff =
checkDiff(differences, "vDiff") -
x * intermediateFunc(deltas[3], vVelocity, velocityToCompare);
// console.log(checkDiff(differences, 'vDiff'))
} else {
diffs.stDiff =
checkDiff(differences, "stDiff") -
x * intermediateFunc(deltas[4], stVelocity, velocityToCompare);
}
return diffs;
};
export const calculateDifficulties = (velocities, currentVelocity) => {
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
const diffs = getDiffs(currentVelocity, velocities, intermediateFunc);
while (Object.keys(diffs).length < 4) {
if (diffs.teDiff && !diffs.ltDiff) {
diffs.ltDiff = diffs.teDiff + intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (diffs.ltDiff && !(diffs.teDiff && diffs.vDiff)) {
if (!diffs.teDiff) {
diffs.teDiff = diffs.ltDiff - intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (!diffs.vDiff) {
diffs.vDiff = diffs.ltDiff + intermediateFunc(deltas[2], ltVelocity, vVelocity);
}
}
if (diffs.vDiff && !(diffs.ltDiff && diffs.stDiff)) {
if (!diffs.ltDiff) {
diffs.ltDiff = diffs.vDiff - intermediateFunc(deltas[2], ltVelocity, vVelocity);
}
if (!diffs.stDiff) {
diffs.stDiff = diffs.vDiff + intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
if (diffs.stDiff && !diffs.vDiff) {
diffs.vDiff = diffs.stDiff - intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
return diffs;
}
export const getSpeedDifficulty = (currentVelocity, targetVelocity, velocities) => {
//todo why so many diffs. floating around? get rid of them
const diffs = calculateDifficulties(velocities, currentVelocity);
const finalDiffs = getDiffs(targetVelocity, velocities, intermediateFunc, -1, diffs);
if (Object.values(finalDiffs).length === 1) {
return Object.values(finalDiffs)[0];
}
return 0;
};
export const generateConstants = (questionnaireData) => {
//todo verify personalbests below
const beta = questionnaireData.regular ? 1 : 0.975;
const alpha = Math.max(
0,
Math.min(
1,
(1 / 3) *
beta *
((questionnaireData.frequency * questionnaireData.distance) / 30 +
questionnaireData.experience / 36 +
questionnaireData.frequency / 3)
)
);
/* old code
Math.min(
1,
(1 / 3) *
beta *
((answers.fFrequency * answers.dDistance) / 30 +
answers.lMonths / 36 +
answers.fFrequency / 3)
)
);
*/
const cNewbieGains = (1 / rho) * Math.exp(1 - alpha) + (rho - 1) / rho;
return {alpha, beta, cNewbieGains};
};
// todo edit this again
const getBestTrainingPlan = (trainingPlanPrimary, trainingPlanSecondary) =>
trainingPlanPrimary[0] > trainingPlanSecondary[0] /*&&
trainingPlanPrimary[0] - trainingPlanSecondary[0] < 3 &&
trainingPlanPrimary[1]["personalisedDifficultyMultiplier"] <
trainingPlanSecondary[1]["personalisedDifficultyMultiplier"];*/
export function getUserInfo(questionnaireData, previousFitness) {
const {duration, workoutFrequency} = questionnaireData
//todo fix currentFitness
return {
currentTime: convertToSeconds(questionnaireData.latest),
targetTime: convertToSeconds(questionnaireData.target),
duration,
workoutFrequency,
currentFitness: previousFitness,
};
}
export const generateTrainingPlans = (speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout) => {
const {newFitness, targetDifficulty} = getOverallFitness(
speedDifficulty,
userInfo.duration,
userInfo.currentFitness,
previousWorkout,
);
const getPersonalisedDifficulty = (workout) => {
const temp = JSON.parse(JSON.stringify(workout));
temp.personalisedDifficultyMultiplier =
(speedDifficulty / 100) * workout.difficultyMultiplier * restMultiplier(workout, targetPace); // * 100
return temp;
};
const reducer = (variance, workout) => {
const workoutVariance = Math.abs(workout.personalisedDifficultyMultiplier - targetDifficulty);
if (workoutVariance > variance[0]) {
return variance;
}
return [workoutVariance, workout]; //return [workoutVariance, ...workout];
};
const primaryIntervalsCopy = primary.map(getPersonalisedDifficulty);
const secondaryIntervalsCopy = secondary.map(getPersonalisedDifficulty);
const trainingPlanPrimary = primaryIntervalsCopy.reduce(reducer, [10000]);
const trainingPlanSecondary = secondaryIntervalsCopy.reduce(reducer, [trainingPlanPrimary[1]]);
return {trainingPlanPrimary, trainingPlanSecondary, newFitness};
}
const getIntervalTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace) => {
const velocities = getVelocities(getPaces(targetPace, cNewbieGains));
// velocities in km/hr, paces in s/m
const speedDifficulty = getSpeedDifficulty(convertToVelocity(userInfo.currentTime), convertToVelocity(userInfo.targetTime), velocities); // getSpeedDifficulty(currentVelocity, paces);
const {
trainingPlanPrimary,
trainingPlanSecondary,
newFitness
} = generateTrainingPlans(speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout);
// console.log(JSON.stringify(trainingPlanPrimary), JSON.stringify(trainingPlanSecondary))
let trainingPlan = getBestTrainingPlan(trainingPlanPrimary, trainingPlanSecondary)
? trainingPlanSecondary[1]
: trainingPlanPrimary[1];
trainingPlan.parts[0]["rest"] = getPrescribedRest(trainingPlan.parts[0]["restMultiplier"], targetPace);
trainingPlan.parts[0]["pace"] = displayPace
return {newFitness, trainingPlan};
}
const readJSON = async (name) => {
return fetch("./" + name + ".json")
.then(async response => {
return await response.json();
})
}
const getRoundedDistance = (time, tempoPace) => Math.ceil((time * 60 / tempoPace) / 0.5) * 0.5;
const getFartlekWorkout = (fillerWorkout, tempoPace, targetPace) => {
let jogTime, jogDistance, jogPace
const {sprintDistance} = fillerWorkout.parts[0]
const jogPaceFunction = (jogPaceString) => new Function('tempoPace', jogPaceString)
/* the Python version had a better implementation of jogPaceFunction --> rather than adding a string to the database, we simply create an array like this ['tempoPace', '0.5'] where 0.5 is meant to be added to tempoPace
def get_sprint_pace(sprintPace):
y = 0
for x in sprintPace:
if x == 'goalPace':
y += goalPace
else:
y += int(x)
return y
*/
if (fillerWorkout.parts[0].jogByTime) {
jogTime = fillerWorkout.parts[0].jogByTime
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogDistance = jogTime / jogPace
} else if (fillerWorkout.parts[0].jogByDistance) {
jogDistance = fillerWorkout.parts[0].jogByDistance
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogTime = jogDistance * jogPace
}
const sprintPaceFunction = (sprintPaceString) => new Function('targetPace', sprintPaceString) //same as jogPaceFunction
const sprintPace = sprintPaceFunction(fillerWorkout.parts[0].sprintPace)(targetPace)
return { //pace in s/m, distance in m, time in s
sprintDistance,
jogTime,
jogPace,
sprintPace,
jogDistance
}
}
const getFartlekTrainingPlan = async (alpha, weekNumber, tempoPace, targetPace) => {
// const fartlek = require('./fartlek.json')
const fartlek = await readJSON('fartlek')
for (let i = 0; i < fartlek.length; i++) {
const fillerWorkout = fartlek[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets: fillerWorkout.parts[0].sprintSets}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
if (alpha < 0.8) {
const sprintSets = fillerWorkout.parts[0].sprintSets + weekNumber - fillerWorkout.parts[0].weekAt
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets}
}
const fartlekWorkout = getFartlekWorkout(fillerWorkout, tempoPace, targetPace)
const newSprintPace = fartlekWorkout.sprintPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 0.00250
return {...fartlekWorkout, sprintPace: newSprintPace, sprintSets: fillerWorkout.parts[0].sprintSets}
}
}
}
}
const getLongDistanceTrainingPlan = async (alpha, weekNumber, tempoPace) => {
// const longDistance = require('./longDistance.json')
const longDistance = await readJSON('longDistance')
for (let i = 0; i < longDistance.length; i++) {
const fillerWorkout = longDistance[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
const convertedTempoPace = tempoPace * 1000
return { //runTime in min, tempoPace in s/m, distance in km
runTime: fillerWorkout.parts[0].runTime,
tempoPace,
distance: getRoundedDistance(fillerWorkout.parts[0].runTime, convertedTempoPace)
}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
const convertedTempoPace = tempoPace * 1000
const tempoPaceNew = convertedTempoPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 3
const runTime = fillerWorkout.parts[0].runTime
const distance = getRoundedDistance(runTime, tempoPaceNew)
return {distance, runTime, tempoPace: tempoPaceNew / 1000}
}
}
}
}
const getWeeksAndStartDate = (firstWorkoutTimestamp, currentDatestamp) => {
let numberOfWeeksElapsed = 0
let weekStartDatestamp = firstWorkoutTimestamp
while (weekStartDatestamp < currentDatestamp) {
numberOfWeeksElapsed++
weekStartDatestamp += (604800000 * numberOfWeeksElapsed)
}
return {numberOfWeeksElapsed, weekStartDatestamp}
}
const getNextDate = (dateToCompare, previousWorkoutDate) => {
if ((dateToCompare - sanitiseWeekDateStamp(previousWorkoutDate)) < 86400000) return dateToCompare + 86400000
return dateToCompare
}
//todo this function is using test values
const getSuggestedDate = (userInfo, previousWorkout) => {
const sanitisedCurrentDatestamp = sanitiseWeekDateStamp(Date.now())
const {ipptDatestamp} = userInfo
//if close to IPPT date
if ((sanitiseWeekDateStamp(ipptDatestamp) - sanitisedCurrentDatestamp) < (86400000 * 2)) return null
if (!!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^4]/)[0])) {
const firstWorkoutTimestamp = parseInt('1622542227000')
const currentDatestamp = Date.now()
let {numberOfWeeksElapsed} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
return getNextDate(nextWeekStart, previousWorkout.date)
}
return getNextDate(sanitisedCurrentDatestamp, previousWorkout.date)
// return getNextDate(sanitisedCurrentDatestamp, Date.now())
}
const getOneOfThreeTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek) => {
const firstWorkoutTimestamp = parseInt('1622542227000')
const {workoutFrequency, ipptDatestamp} = userInfo
const currentDatestamp = Date.now()
userInfo.duration = 8//todo Math.floor(ipptDatestamp - currentDatestamp)
const previousWorkoutDatestamp = previousWorkout ? previousWorkout.date : ''
let {numberOfWeeksElapsed, weekStartDatestamp} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
weekStartDatestamp = sanitiseWeekDateStamp(weekStartDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
const tempoPace = getPaces(targetPace, cNewbieGains)[0]
const isPreviousWorkoutIntervalWorkout = !!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^[123]/)[0])
if ((ipptDatestamp - currentDatestamp) < 604800000) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
if (workoutFrequency === 1 || !(Object.keys(previousWorkout).length > 0)) return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace);
if (workoutFrequency === 2) {
if (isPreviousWorkoutIntervalWorkout && previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
}
if (workoutFrequency === 3) {
if (previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
}
return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace)
}
export const getTrainingPlan = (questionnaireData, workouts, previousWorkout = {}, previousFitness = 100) => {
const [primary, secondary, pyramid, longDistance, fartlek] = workouts
if (questionnaireData.regular) {
//TBC logic
}
const userInfo = getUserInfo(questionnaireData, previousFitness);
const {alpha, beta, cNewbieGains} = generateConstants(questionnaireData);
const {targetPace, displayPace} = getTargetPaces(userInfo.targetTime);
const suggestedDate = getSuggestedDate(userInfo, previousWorkout)
const {
newFitness,
trainingPlan
} = getOneOfThreeTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek);
return {newFitness, trainingPlan, suggestedDate};
};
export async function getJSON(url) |
const testObj = {
permSetCount: "9",
permDistance: "300",
timings: ["63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75"], //todo for now code below assumes these set times are in seconds
permPaceTime: "21250", //in ms
};
const myResults = {
difficultyMultiplier: 69,
personalisedDifficultyMultiplier: 119,
segment: "secondary",
type: "Distance Interval",
workout_ID: "2003",
parts: [
{
part_ID: "2003_0",
distance: 400,
pace: 25000,
rest: 60,
restMultiplier: 2.33,
sets: 7,
timings: [125000, 110000, 120000, 108000, 102000, 123333],
},
],
};
// const testObj = {
// userProfile: {
// displayName: "Elon Musk",
// email: "[email protected]",
// bio: "I like running!",
// dp: "https://techcrunch.com/wp-content/uploads/2021/01/GettyImages-1229901940.jpg?w=730&crop=1",
// badges: ["Top Runner", "5K Finisher", "IPPT Gold"],
// },
// workout: {
// date: Date.now(),
// difficultyMultiplier: 91,
// review: {
// feeling: "good",
// reflection: "This was one of the toughest training I have faced in a long time.",
// },
// parts: [
// {
// distance: 300,
// pace: 25000,
// part_ID: "1006_0",
// rest: 115,
// restMultiplier: 4.5,
// sets: 8,
// timings: [82000, 207000, 83000, 79430, 78236],
// },
// ],
// personalisedDifficultyMultiplier: 160.0173922309409,
// segment: "primary",
// type: "Distance Interval",
// workout_ID: "1006",
// feeling: "good",
// },
// comments: {
// adfalksdfh: {
// time: Date.now(),
// text: "Daddy Musk is the best!",
// userProfile: {
// displayName: "X Æ A-12",
// email: "[email protected]",
// bio: "I am human!!! MUHAHAHA",
// badges: ["Baby Musk", "Best Name", "5m Finisher"],
// dp: "https://static.scientificamerican.com/blogs/cache/file/7069F0BB-A9AB-4932-84F508BBC0136458_source.jpg?w=590&h=800&F754D658-CE37-41EE-BE2C0EFC7134D582",
// },
// },
// },
// }; | {
try {
const raw = await fetch(url);
return await raw.json();
} catch (error) {
throw error;
}
} | identifier_body |
Helper.js | /*
Also Jlow wants to implement a way to change the workout scorer and generator both if user chooses his own custom rest time
Commitment per week - if it changes halfway through --> skip workout --> if workout skipped or if current date doesn't match the date the next workout was supposed to happen calculated from date when last workout was done, change currentFitness
Fix the logic to suggest the second workout - Fix the way we implement workout score, workout difficulty, current fitness and target fitness. Figure out how to update current fitness, whether we are using the proper target fitness calculated from deltaDiff + workoutScore or deltaDiff + workout_difficulty to suggest the next workout, do we need to save current or target fitness
Jlow said high standard deviation (uneven set times) should result in a higher workout score. See results of beta test
Fitness degradation curve
edge cases - 20 min current vs 10 target --> suggest easier workout in the sense that he does smaller distances --> add easier workouts <-- also set a hardcoded ceiling dlifficultyMultiplier that warns user that the workout is too hard, do the workout at their own risk <-- see long-ass WA discussion
cycles of 3
convert Jlow's fillerworkout notes to unit tests
Some no-gos
1. The average time is more than 5% off the goal pace but the workout score is more than 95
2. The workout score exceeds 120(unless the values are ridiculous)
3. The training times/variance are not a realistic number(like 90s SD for 500m trainings)
Improve on overall fitness calculator
suggest pyramid workout
implement something like a linked list for filler workouts instead of the stupid 'end' field and having multiple objects
need to save goal and tempo pace as well
Avoid suggesting a workout that is too close to the IPPT date
Migrate database from weeks to timestamp for existing users
Original interval workout code suggester to use timestamp
Workout Frequency is 0 < x <= 3
Save the date of the first interval workout, change number of weeks to specific date
Basically we also need to regenerate the suggested workout everytime the user logs in, since the current date will change
Upon previous filler workout success
In fartlek workouts, rename goalPace to targetPace
*/
import {scoredWorkouts} from "./workoutScorer.js";
// All constants below TBC
const rho = 7.0;
// tePace, ltPace, vPace, stPace
const phi = [1, 1, 1, 1];
const paceConstants = [1.243, 1.19, 1.057, 0.89];
export const deltas = [0.41, 0.49, 0.55, 0.65, 0.73];
const getPace = (time) => time / 2400;
const convertSecToHour = (timeInSec) => timeInSec / (60 * 60);
export const getTargetPaces = (targetTime) => {
const targetPace = getPace(targetTime);
const displayPace = Math.floor(targetPace * 100 * 1000);
return {targetPace, displayPace}
}
export const convertToVelocity = (currentTime) => 2.4 / convertSecToHour(currentTime);
export const getPrescribedRest = (restMultiple, targetPace) => Math.round((restMultiple * targetPace * 100) / 5) * 5;
const restRatio = (restMultiple, targetPace) =>
getPrescribedRest(restMultiple, targetPace) / (restMultiple * targetPace * 100);
const restMultiplier = (workout, targetPace) => 1 / Math.exp(0.0024 * restRatio(workout.parts[0]["restMultiplier"], targetPace));
const convertToSeconds = (input) => {
return input.split(":").reduce((acc, ele, i) => {
if (i === 0) {
return acc + Number(ele * 60);
} else return acc + Number(ele);
}, 0);
}
const sanitiseWeekDateStamp = (timestamp) => timestamp - (timestamp % 86400000)
export const getPaces = (targetPace, cNewbieGains) =>
phi.map((phiValue, i) => targetPace * paceConstants[i] * cNewbieGains * phiValue)
export const getVelocities = (paces) =>
paces.map((pace) => (1 / pace) * 3.6);
const intermediateFunc = (delta, velocityOne, velocityTwo) =>
delta * velocityOne * Math.exp(velocityTwo - velocityOne);
export const getOverallFitness = (speedDifficulty, duration, currentFitness, previousWorkout) => {
const deltaDifficulty = speedDifficulty - 100;
const deltaDifficultyPerWeek = deltaDifficulty / duration;
if (Object.keys(previousWorkout).length < 1) return {newFitness: 100, targetDifficulty: 100 + deltaDifficultyPerWeek};
const previousWorkoutScore = scoredWorkouts(previousWorkout);
if (previousWorkoutScore.workoutScore < 94) {
return {newFitness: currentFitness, targetDifficulty: currentFitness + deltaDifficultyPerWeek};
}
return {
newFitness: previousWorkout.personalisedDifficultyMultiplier,
targetDifficulty: previousWorkout.personalisedDifficultyMultiplier + deltaDifficultyPerWeek,
};
/* To update code above with the following decision logic
1. Decision logic to determine if workout is a success or failure
a. If workout score is between 94-105, workout=success
b. If workout score < 94, workout=fail
c. If workout score > 105, workout= breakthrough
2. If workout turns out to be a failure or success
a. if previous workout is success, continue with next workout, change nothing
b. if workout is a failure and fail_count == 0,
i. Set k = 1.2, fail_count++
c. if workout is a failure and fail_count == 1,
i. Set x = P(avg)
d. if workout is a breakthrough and breakthrough_count == 0,
i. breakthrough count++
e. if workout is a breakthrough and breakthrough _count == 1,
i. Set x = P(avg)
*/
};
/// for the first time we are calling get_diffs, we use 100. For the second stage, we use the calculated diff
const checkDiff = (diffs, diff) => {
if (diffs[diff]) {
return diffs[diff];
}
return 100;
};
const getDiffs = (velocityToCompare, velocities, intermediateFunc, x = 1, differences = {}) => {
let diffs = {};
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
if (velocityToCompare < teVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") +
x * (deltas[0] * teVelocity * Math.exp(teVelocity - velocityToCompare));
} else if (velocityToCompare < ltVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") -
x * intermediateFunc(deltas[1], teVelocity, velocityToCompare);
} else if (velocityToCompare < vVelocity) {
diffs.ltDiff =
checkDiff(differences, "ltDiff") -
x * intermediateFunc(deltas[2], ltVelocity, velocityToCompare);
} else if (velocityToCompare < stVelocity) {
diffs.vDiff =
checkDiff(differences, "vDiff") -
x * intermediateFunc(deltas[3], vVelocity, velocityToCompare);
// console.log(checkDiff(differences, 'vDiff'))
} else {
diffs.stDiff =
checkDiff(differences, "stDiff") -
x * intermediateFunc(deltas[4], stVelocity, velocityToCompare);
}
return diffs;
};
export const calculateDifficulties = (velocities, currentVelocity) => {
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
const diffs = getDiffs(currentVelocity, velocities, intermediateFunc);
while (Object.keys(diffs).length < 4) {
if (diffs.teDiff && !diffs.ltDiff) {
diffs.ltDiff = diffs.teDiff + intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (diffs.ltDiff && !(diffs.teDiff && diffs.vDiff)) {
if (!diffs.teDiff) {
diffs.teDiff = diffs.ltDiff - intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (!diffs.vDiff) {
diffs.vDiff = diffs.ltDiff + intermediateFunc(deltas[2], ltVelocity, vVelocity);
}
}
if (diffs.vDiff && !(diffs.ltDiff && diffs.stDiff)) {
if (!diffs.ltDiff) {
diffs.ltDiff = diffs.vDiff - intermediateFunc(deltas[2], ltVelocity, vVelocity);
}
if (!diffs.stDiff) {
diffs.stDiff = diffs.vDiff + intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
if (diffs.stDiff && !diffs.vDiff) {
diffs.vDiff = diffs.stDiff - intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
return diffs;
}
export const getSpeedDifficulty = (currentVelocity, targetVelocity, velocities) => {
//todo why so many diffs. floating around? get rid of them
const diffs = calculateDifficulties(velocities, currentVelocity);
const finalDiffs = getDiffs(targetVelocity, velocities, intermediateFunc, -1, diffs);
if (Object.values(finalDiffs).length === 1) {
return Object.values(finalDiffs)[0];
}
return 0;
};
export const generateConstants = (questionnaireData) => {
//todo verify personalbests below
const beta = questionnaireData.regular ? 1 : 0.975;
const alpha = Math.max(
0,
Math.min(
1,
(1 / 3) *
beta *
((questionnaireData.frequency * questionnaireData.distance) / 30 +
questionnaireData.experience / 36 +
questionnaireData.frequency / 3)
)
);
/* old code
Math.min(
1,
(1 / 3) *
beta *
((answers.fFrequency * answers.dDistance) / 30 +
answers.lMonths / 36 +
answers.fFrequency / 3)
)
);
*/
const cNewbieGains = (1 / rho) * Math.exp(1 - alpha) + (rho - 1) / rho;
return {alpha, beta, cNewbieGains};
};
// todo edit this again
const getBestTrainingPlan = (trainingPlanPrimary, trainingPlanSecondary) =>
trainingPlanPrimary[0] > trainingPlanSecondary[0] /*&&
trainingPlanPrimary[0] - trainingPlanSecondary[0] < 3 &&
trainingPlanPrimary[1]["personalisedDifficultyMultiplier"] <
trainingPlanSecondary[1]["personalisedDifficultyMultiplier"];*/
export function | (questionnaireData, previousFitness) {
const {duration, workoutFrequency} = questionnaireData
//todo fix currentFitness
return {
currentTime: convertToSeconds(questionnaireData.latest),
targetTime: convertToSeconds(questionnaireData.target),
duration,
workoutFrequency,
currentFitness: previousFitness,
};
}
export const generateTrainingPlans = (speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout) => {
const {newFitness, targetDifficulty} = getOverallFitness(
speedDifficulty,
userInfo.duration,
userInfo.currentFitness,
previousWorkout,
);
const getPersonalisedDifficulty = (workout) => {
const temp = JSON.parse(JSON.stringify(workout));
temp.personalisedDifficultyMultiplier =
(speedDifficulty / 100) * workout.difficultyMultiplier * restMultiplier(workout, targetPace); // * 100
return temp;
};
const reducer = (variance, workout) => {
const workoutVariance = Math.abs(workout.personalisedDifficultyMultiplier - targetDifficulty);
if (workoutVariance > variance[0]) {
return variance;
}
return [workoutVariance, workout]; //return [workoutVariance, ...workout];
};
const primaryIntervalsCopy = primary.map(getPersonalisedDifficulty);
const secondaryIntervalsCopy = secondary.map(getPersonalisedDifficulty);
const trainingPlanPrimary = primaryIntervalsCopy.reduce(reducer, [10000]);
const trainingPlanSecondary = secondaryIntervalsCopy.reduce(reducer, [trainingPlanPrimary[1]]);
return {trainingPlanPrimary, trainingPlanSecondary, newFitness};
}
const getIntervalTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace) => {
const velocities = getVelocities(getPaces(targetPace, cNewbieGains));
// velocities in km/hr, paces in s/m
const speedDifficulty = getSpeedDifficulty(convertToVelocity(userInfo.currentTime), convertToVelocity(userInfo.targetTime), velocities); // getSpeedDifficulty(currentVelocity, paces);
const {
trainingPlanPrimary,
trainingPlanSecondary,
newFitness
} = generateTrainingPlans(speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout);
// console.log(JSON.stringify(trainingPlanPrimary), JSON.stringify(trainingPlanSecondary))
let trainingPlan = getBestTrainingPlan(trainingPlanPrimary, trainingPlanSecondary)
? trainingPlanSecondary[1]
: trainingPlanPrimary[1];
trainingPlan.parts[0]["rest"] = getPrescribedRest(trainingPlan.parts[0]["restMultiplier"], targetPace);
trainingPlan.parts[0]["pace"] = displayPace
return {newFitness, trainingPlan};
}
const readJSON = async (name) => {
return fetch("./" + name + ".json")
.then(async response => {
return await response.json();
})
}
const getRoundedDistance = (time, tempoPace) => Math.ceil((time * 60 / tempoPace) / 0.5) * 0.5;
const getFartlekWorkout = (fillerWorkout, tempoPace, targetPace) => {
let jogTime, jogDistance, jogPace
const {sprintDistance} = fillerWorkout.parts[0]
const jogPaceFunction = (jogPaceString) => new Function('tempoPace', jogPaceString)
/* the Python version had a better implementation of jogPaceFunction --> rather than adding a string to the database, we simply create an array like this ['tempoPace', '0.5'] where 0.5 is meant to be added to tempoPace
def get_sprint_pace(sprintPace):
y = 0
for x in sprintPace:
if x == 'goalPace':
y += goalPace
else:
y += int(x)
return y
*/
if (fillerWorkout.parts[0].jogByTime) {
jogTime = fillerWorkout.parts[0].jogByTime
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogDistance = jogTime / jogPace
} else if (fillerWorkout.parts[0].jogByDistance) {
jogDistance = fillerWorkout.parts[0].jogByDistance
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogTime = jogDistance * jogPace
}
const sprintPaceFunction = (sprintPaceString) => new Function('targetPace', sprintPaceString) //same as jogPaceFunction
const sprintPace = sprintPaceFunction(fillerWorkout.parts[0].sprintPace)(targetPace)
return { //pace in s/m, distance in m, time in s
sprintDistance,
jogTime,
jogPace,
sprintPace,
jogDistance
}
}
const getFartlekTrainingPlan = async (alpha, weekNumber, tempoPace, targetPace) => {
// const fartlek = require('./fartlek.json')
const fartlek = await readJSON('fartlek')
for (let i = 0; i < fartlek.length; i++) {
const fillerWorkout = fartlek[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets: fillerWorkout.parts[0].sprintSets}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
if (alpha < 0.8) {
const sprintSets = fillerWorkout.parts[0].sprintSets + weekNumber - fillerWorkout.parts[0].weekAt
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets}
}
const fartlekWorkout = getFartlekWorkout(fillerWorkout, tempoPace, targetPace)
const newSprintPace = fartlekWorkout.sprintPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 0.00250
return {...fartlekWorkout, sprintPace: newSprintPace, sprintSets: fillerWorkout.parts[0].sprintSets}
}
}
}
}
const getLongDistanceTrainingPlan = async (alpha, weekNumber, tempoPace) => {
// const longDistance = require('./longDistance.json')
const longDistance = await readJSON('longDistance')
for (let i = 0; i < longDistance.length; i++) {
const fillerWorkout = longDistance[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
const convertedTempoPace = tempoPace * 1000
return { //runTime in min, tempoPace in s/m, distance in km
runTime: fillerWorkout.parts[0].runTime,
tempoPace,
distance: getRoundedDistance(fillerWorkout.parts[0].runTime, convertedTempoPace)
}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
const convertedTempoPace = tempoPace * 1000
const tempoPaceNew = convertedTempoPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 3
const runTime = fillerWorkout.parts[0].runTime
const distance = getRoundedDistance(runTime, tempoPaceNew)
return {distance, runTime, tempoPace: tempoPaceNew / 1000}
}
}
}
}
const getWeeksAndStartDate = (firstWorkoutTimestamp, currentDatestamp) => {
let numberOfWeeksElapsed = 0
let weekStartDatestamp = firstWorkoutTimestamp
while (weekStartDatestamp < currentDatestamp) {
numberOfWeeksElapsed++
weekStartDatestamp += (604800000 * numberOfWeeksElapsed)
}
return {numberOfWeeksElapsed, weekStartDatestamp}
}
const getNextDate = (dateToCompare, previousWorkoutDate) => {
if ((dateToCompare - sanitiseWeekDateStamp(previousWorkoutDate)) < 86400000) return dateToCompare + 86400000
return dateToCompare
}
//todo this function is using test values
const getSuggestedDate = (userInfo, previousWorkout) => {
const sanitisedCurrentDatestamp = sanitiseWeekDateStamp(Date.now())
const {ipptDatestamp} = userInfo
//if close to IPPT date
if ((sanitiseWeekDateStamp(ipptDatestamp) - sanitisedCurrentDatestamp) < (86400000 * 2)) return null
if (!!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^4]/)[0])) {
const firstWorkoutTimestamp = parseInt('1622542227000')
const currentDatestamp = Date.now()
let {numberOfWeeksElapsed} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
return getNextDate(nextWeekStart, previousWorkout.date)
}
return getNextDate(sanitisedCurrentDatestamp, previousWorkout.date)
// return getNextDate(sanitisedCurrentDatestamp, Date.now())
}
const getOneOfThreeTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek) => {
const firstWorkoutTimestamp = parseInt('1622542227000')
const {workoutFrequency, ipptDatestamp} = userInfo
const currentDatestamp = Date.now()
userInfo.duration = 8//todo Math.floor(ipptDatestamp - currentDatestamp)
const previousWorkoutDatestamp = previousWorkout ? previousWorkout.date : ''
let {numberOfWeeksElapsed, weekStartDatestamp} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
weekStartDatestamp = sanitiseWeekDateStamp(weekStartDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
const tempoPace = getPaces(targetPace, cNewbieGains)[0]
const isPreviousWorkoutIntervalWorkout = !!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^[123]/)[0])
if ((ipptDatestamp - currentDatestamp) < 604800000) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
if (workoutFrequency === 1 || !(Object.keys(previousWorkout).length > 0)) return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace);
if (workoutFrequency === 2) {
if (isPreviousWorkoutIntervalWorkout && previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
}
if (workoutFrequency === 3) {
if (previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
}
return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace)
}
export const getTrainingPlan = (questionnaireData, workouts, previousWorkout = {}, previousFitness = 100) => {
const [primary, secondary, pyramid, longDistance, fartlek] = workouts
if (questionnaireData.regular) {
//TBC logic
}
const userInfo = getUserInfo(questionnaireData, previousFitness);
const {alpha, beta, cNewbieGains} = generateConstants(questionnaireData);
const {targetPace, displayPace} = getTargetPaces(userInfo.targetTime);
const suggestedDate = getSuggestedDate(userInfo, previousWorkout)
const {
newFitness,
trainingPlan
} = getOneOfThreeTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek);
return {newFitness, trainingPlan, suggestedDate};
};
export async function getJSON(url) {
try {
const raw = await fetch(url);
return await raw.json();
} catch (error) {
throw error;
}
}
const testObj = {
permSetCount: "9",
permDistance: "300",
timings: ["63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75"], //todo for now code below assumes these set times are in seconds
permPaceTime: "21250", //in ms
};
const myResults = {
difficultyMultiplier: 69,
personalisedDifficultyMultiplier: 119,
segment: "secondary",
type: "Distance Interval",
workout_ID: "2003",
parts: [
{
part_ID: "2003_0",
distance: 400,
pace: 25000,
rest: 60,
restMultiplier: 2.33,
sets: 7,
timings: [125000, 110000, 120000, 108000, 102000, 123333],
},
],
};
// const testObj = {
// userProfile: {
// displayName: "Elon Musk",
// email: "[email protected]",
// bio: "I like running!",
// dp: "https://techcrunch.com/wp-content/uploads/2021/01/GettyImages-1229901940.jpg?w=730&crop=1",
// badges: ["Top Runner", "5K Finisher", "IPPT Gold"],
// },
// workout: {
// date: Date.now(),
// difficultyMultiplier: 91,
// review: {
// feeling: "good",
// reflection: "This was one of the toughest training I have faced in a long time.",
// },
// parts: [
// {
// distance: 300,
// pace: 25000,
// part_ID: "1006_0",
// rest: 115,
// restMultiplier: 4.5,
// sets: 8,
// timings: [82000, 207000, 83000, 79430, 78236],
// },
// ],
// personalisedDifficultyMultiplier: 160.0173922309409,
// segment: "primary",
// type: "Distance Interval",
// workout_ID: "1006",
// feeling: "good",
// },
// comments: {
// adfalksdfh: {
// time: Date.now(),
// text: "Daddy Musk is the best!",
// userProfile: {
// displayName: "X Æ A-12",
// email: "[email protected]",
// bio: "I am human!!! MUHAHAHA",
// badges: ["Baby Musk", "Best Name", "5m Finisher"],
// dp: "https://static.scientificamerican.com/blogs/cache/file/7069F0BB-A9AB-4932-84F508BBC0136458_source.jpg?w=590&h=800&F754D658-CE37-41EE-BE2C0EFC7134D582",
// },
// },
// },
// }; | getUserInfo | identifier_name |
Helper.js | /*
Also Jlow wants to implement a way to change the workout scorer and generator both if user chooses his own custom rest time
Commitment per week - if it changes halfway through --> skip workout --> if workout skipped or if current date doesn't match the date the next workout was supposed to happen calculated from date when last workout was done, change currentFitness
Fix the logic to suggest the second workout - Fix the way we implement workout score, workout difficulty, current fitness and target fitness. Figure out how to update current fitness, whether we are using the proper target fitness calculated from deltaDiff + workoutScore or deltaDiff + workout_difficulty to suggest the next workout, do we need to save current or target fitness
Jlow said high standard deviation (uneven set times) should result in a higher workout score. See results of beta test
Fitness degradation curve
edge cases - 20 min current vs 10 target --> suggest easier workout in the sense that he does smaller distances --> add easier workouts <-- also set a hardcoded ceiling dlifficultyMultiplier that warns user that the workout is too hard, do the workout at their own risk <-- see long-ass WA discussion
cycles of 3
convert Jlow's fillerworkout notes to unit tests
Some no-gos
1. The average time is more than 5% off the goal pace but the workout score is more than 95
2. The workout score exceeds 120(unless the values are ridiculous)
3. The training times/variance are not a realistic number(like 90s SD for 500m trainings)
Improve on overall fitness calculator
suggest pyramid workout
implement something like a linked list for filler workouts instead of the stupid 'end' field and having multiple objects
need to save goal and tempo pace as well
Avoid suggesting a workout that is too close to the IPPT date
Migrate database from weeks to timestamp for existing users
Original interval workout code suggester to use timestamp
Workout Frequency is 0 < x <= 3
Save the date of the first interval workout, change number of weeks to specific date
Basically we also need to regenerate the suggested workout everytime the user logs in, since the current date will change
Upon previous filler workout success
In fartlek workouts, rename goalPace to targetPace
*/
import {scoredWorkouts} from "./workoutScorer.js";
// All constants below TBC
const rho = 7.0;
// tePace, ltPace, vPace, stPace
const phi = [1, 1, 1, 1];
const paceConstants = [1.243, 1.19, 1.057, 0.89];
export const deltas = [0.41, 0.49, 0.55, 0.65, 0.73];
const getPace = (time) => time / 2400;
const convertSecToHour = (timeInSec) => timeInSec / (60 * 60);
export const getTargetPaces = (targetTime) => {
const targetPace = getPace(targetTime);
const displayPace = Math.floor(targetPace * 100 * 1000);
return {targetPace, displayPace}
}
export const convertToVelocity = (currentTime) => 2.4 / convertSecToHour(currentTime);
export const getPrescribedRest = (restMultiple, targetPace) => Math.round((restMultiple * targetPace * 100) / 5) * 5;
const restRatio = (restMultiple, targetPace) =>
getPrescribedRest(restMultiple, targetPace) / (restMultiple * targetPace * 100);
const restMultiplier = (workout, targetPace) => 1 / Math.exp(0.0024 * restRatio(workout.parts[0]["restMultiplier"], targetPace));
const convertToSeconds = (input) => {
return input.split(":").reduce((acc, ele, i) => {
if (i === 0) {
return acc + Number(ele * 60);
} else return acc + Number(ele);
}, 0);
}
const sanitiseWeekDateStamp = (timestamp) => timestamp - (timestamp % 86400000)
export const getPaces = (targetPace, cNewbieGains) =>
phi.map((phiValue, i) => targetPace * paceConstants[i] * cNewbieGains * phiValue)
export const getVelocities = (paces) =>
paces.map((pace) => (1 / pace) * 3.6);
const intermediateFunc = (delta, velocityOne, velocityTwo) =>
delta * velocityOne * Math.exp(velocityTwo - velocityOne);
export const getOverallFitness = (speedDifficulty, duration, currentFitness, previousWorkout) => {
const deltaDifficulty = speedDifficulty - 100;
const deltaDifficultyPerWeek = deltaDifficulty / duration;
if (Object.keys(previousWorkout).length < 1) return {newFitness: 100, targetDifficulty: 100 + deltaDifficultyPerWeek};
const previousWorkoutScore = scoredWorkouts(previousWorkout);
if (previousWorkoutScore.workoutScore < 94) {
return {newFitness: currentFitness, targetDifficulty: currentFitness + deltaDifficultyPerWeek};
}
return {
newFitness: previousWorkout.personalisedDifficultyMultiplier,
targetDifficulty: previousWorkout.personalisedDifficultyMultiplier + deltaDifficultyPerWeek,
};
/* To update code above with the following decision logic
1. Decision logic to determine if workout is a success or failure
a. If workout score is between 94-105, workout=success
b. If workout score < 94, workout=fail
c. If workout score > 105, workout= breakthrough
2. If workout turns out to be a failure or success
a. if previous workout is success, continue with next workout, change nothing
b. if workout is a failure and fail_count == 0,
i. Set k = 1.2, fail_count++
c. if workout is a failure and fail_count == 1,
i. Set x = P(avg)
d. if workout is a breakthrough and breakthrough_count == 0,
i. breakthrough count++
e. if workout is a breakthrough and breakthrough _count == 1,
i. Set x = P(avg)
*/
};
/// for the first time we are calling get_diffs, we use 100. For the second stage, we use the calculated diff
const checkDiff = (diffs, diff) => {
if (diffs[diff]) {
return diffs[diff];
}
return 100;
};
const getDiffs = (velocityToCompare, velocities, intermediateFunc, x = 1, differences = {}) => {
let diffs = {};
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
if (velocityToCompare < teVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") +
x * (deltas[0] * teVelocity * Math.exp(teVelocity - velocityToCompare));
} else if (velocityToCompare < ltVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") -
x * intermediateFunc(deltas[1], teVelocity, velocityToCompare);
} else if (velocityToCompare < vVelocity) {
diffs.ltDiff =
checkDiff(differences, "ltDiff") -
x * intermediateFunc(deltas[2], ltVelocity, velocityToCompare);
} else if (velocityToCompare < stVelocity) {
diffs.vDiff =
checkDiff(differences, "vDiff") -
x * intermediateFunc(deltas[3], vVelocity, velocityToCompare);
// console.log(checkDiff(differences, 'vDiff'))
} else {
diffs.stDiff =
checkDiff(differences, "stDiff") -
x * intermediateFunc(deltas[4], stVelocity, velocityToCompare);
}
return diffs;
};
export const calculateDifficulties = (velocities, currentVelocity) => {
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
const diffs = getDiffs(currentVelocity, velocities, intermediateFunc);
while (Object.keys(diffs).length < 4) {
if (diffs.teDiff && !diffs.ltDiff) {
diffs.ltDiff = diffs.teDiff + intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (diffs.ltDiff && !(diffs.teDiff && diffs.vDiff)) {
if (!diffs.teDiff) {
diffs.teDiff = diffs.ltDiff - intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (!diffs.vDiff) {
diffs.vDiff = diffs.ltDiff + intermediateFunc(deltas[2], ltVelocity, vVelocity);
} | }
if (!diffs.stDiff) {
diffs.stDiff = diffs.vDiff + intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
if (diffs.stDiff && !diffs.vDiff) {
diffs.vDiff = diffs.stDiff - intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
return diffs;
}
export const getSpeedDifficulty = (currentVelocity, targetVelocity, velocities) => {
//todo why so many diffs. floating around? get rid of them
const diffs = calculateDifficulties(velocities, currentVelocity);
const finalDiffs = getDiffs(targetVelocity, velocities, intermediateFunc, -1, diffs);
if (Object.values(finalDiffs).length === 1) {
return Object.values(finalDiffs)[0];
}
return 0;
};
export const generateConstants = (questionnaireData) => {
//todo verify personalbests below
const beta = questionnaireData.regular ? 1 : 0.975;
const alpha = Math.max(
0,
Math.min(
1,
(1 / 3) *
beta *
((questionnaireData.frequency * questionnaireData.distance) / 30 +
questionnaireData.experience / 36 +
questionnaireData.frequency / 3)
)
);
/* old code
Math.min(
1,
(1 / 3) *
beta *
((answers.fFrequency * answers.dDistance) / 30 +
answers.lMonths / 36 +
answers.fFrequency / 3)
)
);
*/
const cNewbieGains = (1 / rho) * Math.exp(1 - alpha) + (rho - 1) / rho;
return {alpha, beta, cNewbieGains};
};
// todo edit this again
const getBestTrainingPlan = (trainingPlanPrimary, trainingPlanSecondary) =>
trainingPlanPrimary[0] > trainingPlanSecondary[0] /*&&
trainingPlanPrimary[0] - trainingPlanSecondary[0] < 3 &&
trainingPlanPrimary[1]["personalisedDifficultyMultiplier"] <
trainingPlanSecondary[1]["personalisedDifficultyMultiplier"];*/
export function getUserInfo(questionnaireData, previousFitness) {
const {duration, workoutFrequency} = questionnaireData
//todo fix currentFitness
return {
currentTime: convertToSeconds(questionnaireData.latest),
targetTime: convertToSeconds(questionnaireData.target),
duration,
workoutFrequency,
currentFitness: previousFitness,
};
}
export const generateTrainingPlans = (speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout) => {
const {newFitness, targetDifficulty} = getOverallFitness(
speedDifficulty,
userInfo.duration,
userInfo.currentFitness,
previousWorkout,
);
const getPersonalisedDifficulty = (workout) => {
const temp = JSON.parse(JSON.stringify(workout));
temp.personalisedDifficultyMultiplier =
(speedDifficulty / 100) * workout.difficultyMultiplier * restMultiplier(workout, targetPace); // * 100
return temp;
};
const reducer = (variance, workout) => {
const workoutVariance = Math.abs(workout.personalisedDifficultyMultiplier - targetDifficulty);
if (workoutVariance > variance[0]) {
return variance;
}
return [workoutVariance, workout]; //return [workoutVariance, ...workout];
};
const primaryIntervalsCopy = primary.map(getPersonalisedDifficulty);
const secondaryIntervalsCopy = secondary.map(getPersonalisedDifficulty);
const trainingPlanPrimary = primaryIntervalsCopy.reduce(reducer, [10000]);
const trainingPlanSecondary = secondaryIntervalsCopy.reduce(reducer, [trainingPlanPrimary[1]]);
return {trainingPlanPrimary, trainingPlanSecondary, newFitness};
}
const getIntervalTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace) => {
const velocities = getVelocities(getPaces(targetPace, cNewbieGains));
// velocities in km/hr, paces in s/m
const speedDifficulty = getSpeedDifficulty(convertToVelocity(userInfo.currentTime), convertToVelocity(userInfo.targetTime), velocities); // getSpeedDifficulty(currentVelocity, paces);
const {
trainingPlanPrimary,
trainingPlanSecondary,
newFitness
} = generateTrainingPlans(speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout);
// console.log(JSON.stringify(trainingPlanPrimary), JSON.stringify(trainingPlanSecondary))
let trainingPlan = getBestTrainingPlan(trainingPlanPrimary, trainingPlanSecondary)
? trainingPlanSecondary[1]
: trainingPlanPrimary[1];
trainingPlan.parts[0]["rest"] = getPrescribedRest(trainingPlan.parts[0]["restMultiplier"], targetPace);
trainingPlan.parts[0]["pace"] = displayPace
return {newFitness, trainingPlan};
}
const readJSON = async (name) => {
return fetch("./" + name + ".json")
.then(async response => {
return await response.json();
})
}
const getRoundedDistance = (time, tempoPace) => Math.ceil((time * 60 / tempoPace) / 0.5) * 0.5;
const getFartlekWorkout = (fillerWorkout, tempoPace, targetPace) => {
let jogTime, jogDistance, jogPace
const {sprintDistance} = fillerWorkout.parts[0]
const jogPaceFunction = (jogPaceString) => new Function('tempoPace', jogPaceString)
/* the Python version had a better implementation of jogPaceFunction --> rather than adding a string to the database, we simply create an array like this ['tempoPace', '0.5'] where 0.5 is meant to be added to tempoPace
def get_sprint_pace(sprintPace):
y = 0
for x in sprintPace:
if x == 'goalPace':
y += goalPace
else:
y += int(x)
return y
*/
if (fillerWorkout.parts[0].jogByTime) {
jogTime = fillerWorkout.parts[0].jogByTime
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogDistance = jogTime / jogPace
} else if (fillerWorkout.parts[0].jogByDistance) {
jogDistance = fillerWorkout.parts[0].jogByDistance
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogTime = jogDistance * jogPace
}
const sprintPaceFunction = (sprintPaceString) => new Function('targetPace', sprintPaceString) //same as jogPaceFunction
const sprintPace = sprintPaceFunction(fillerWorkout.parts[0].sprintPace)(targetPace)
return { //pace in s/m, distance in m, time in s
sprintDistance,
jogTime,
jogPace,
sprintPace,
jogDistance
}
}
const getFartlekTrainingPlan = async (alpha, weekNumber, tempoPace, targetPace) => {
// const fartlek = require('./fartlek.json')
const fartlek = await readJSON('fartlek')
for (let i = 0; i < fartlek.length; i++) {
const fillerWorkout = fartlek[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets: fillerWorkout.parts[0].sprintSets}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
if (alpha < 0.8) {
const sprintSets = fillerWorkout.parts[0].sprintSets + weekNumber - fillerWorkout.parts[0].weekAt
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets}
}
const fartlekWorkout = getFartlekWorkout(fillerWorkout, tempoPace, targetPace)
const newSprintPace = fartlekWorkout.sprintPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 0.00250
return {...fartlekWorkout, sprintPace: newSprintPace, sprintSets: fillerWorkout.parts[0].sprintSets}
}
}
}
}
const getLongDistanceTrainingPlan = async (alpha, weekNumber, tempoPace) => {
// const longDistance = require('./longDistance.json')
const longDistance = await readJSON('longDistance')
for (let i = 0; i < longDistance.length; i++) {
const fillerWorkout = longDistance[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
const convertedTempoPace = tempoPace * 1000
return { //runTime in min, tempoPace in s/m, distance in km
runTime: fillerWorkout.parts[0].runTime,
tempoPace,
distance: getRoundedDistance(fillerWorkout.parts[0].runTime, convertedTempoPace)
}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
const convertedTempoPace = tempoPace * 1000
const tempoPaceNew = convertedTempoPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 3
const runTime = fillerWorkout.parts[0].runTime
const distance = getRoundedDistance(runTime, tempoPaceNew)
return {distance, runTime, tempoPace: tempoPaceNew / 1000}
}
}
}
}
const getWeeksAndStartDate = (firstWorkoutTimestamp, currentDatestamp) => {
let numberOfWeeksElapsed = 0
let weekStartDatestamp = firstWorkoutTimestamp
while (weekStartDatestamp < currentDatestamp) {
numberOfWeeksElapsed++
weekStartDatestamp += (604800000 * numberOfWeeksElapsed)
}
return {numberOfWeeksElapsed, weekStartDatestamp}
}
const getNextDate = (dateToCompare, previousWorkoutDate) => {
if ((dateToCompare - sanitiseWeekDateStamp(previousWorkoutDate)) < 86400000) return dateToCompare + 86400000
return dateToCompare
}
//todo this function is using test values
const getSuggestedDate = (userInfo, previousWorkout) => {
const sanitisedCurrentDatestamp = sanitiseWeekDateStamp(Date.now())
const {ipptDatestamp} = userInfo
//if close to IPPT date
if ((sanitiseWeekDateStamp(ipptDatestamp) - sanitisedCurrentDatestamp) < (86400000 * 2)) return null
if (!!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^4]/)[0])) {
const firstWorkoutTimestamp = parseInt('1622542227000')
const currentDatestamp = Date.now()
let {numberOfWeeksElapsed} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
return getNextDate(nextWeekStart, previousWorkout.date)
}
return getNextDate(sanitisedCurrentDatestamp, previousWorkout.date)
// return getNextDate(sanitisedCurrentDatestamp, Date.now())
}
const getOneOfThreeTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek) => {
const firstWorkoutTimestamp = parseInt('1622542227000')
const {workoutFrequency, ipptDatestamp} = userInfo
const currentDatestamp = Date.now()
userInfo.duration = 8//todo Math.floor(ipptDatestamp - currentDatestamp)
const previousWorkoutDatestamp = previousWorkout ? previousWorkout.date : ''
let {numberOfWeeksElapsed, weekStartDatestamp} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
weekStartDatestamp = sanitiseWeekDateStamp(weekStartDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
const tempoPace = getPaces(targetPace, cNewbieGains)[0]
const isPreviousWorkoutIntervalWorkout = !!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^[123]/)[0])
if ((ipptDatestamp - currentDatestamp) < 604800000) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
if (workoutFrequency === 1 || !(Object.keys(previousWorkout).length > 0)) return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace);
if (workoutFrequency === 2) {
if (isPreviousWorkoutIntervalWorkout && previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
}
if (workoutFrequency === 3) {
if (previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
}
return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace)
}
export const getTrainingPlan = (questionnaireData, workouts, previousWorkout = {}, previousFitness = 100) => {
const [primary, secondary, pyramid, longDistance, fartlek] = workouts
if (questionnaireData.regular) {
//TBC logic
}
const userInfo = getUserInfo(questionnaireData, previousFitness);
const {alpha, beta, cNewbieGains} = generateConstants(questionnaireData);
const {targetPace, displayPace} = getTargetPaces(userInfo.targetTime);
const suggestedDate = getSuggestedDate(userInfo, previousWorkout)
const {
newFitness,
trainingPlan
} = getOneOfThreeTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek);
return {newFitness, trainingPlan, suggestedDate};
};
export async function getJSON(url) {
try {
const raw = await fetch(url);
return await raw.json();
} catch (error) {
throw error;
}
}
const testObj = {
permSetCount: "9",
permDistance: "300",
timings: ["63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75", "63.75"], //todo for now code below assumes these set times are in seconds
permPaceTime: "21250", //in ms
};
const myResults = {
difficultyMultiplier: 69,
personalisedDifficultyMultiplier: 119,
segment: "secondary",
type: "Distance Interval",
workout_ID: "2003",
parts: [
{
part_ID: "2003_0",
distance: 400,
pace: 25000,
rest: 60,
restMultiplier: 2.33,
sets: 7,
timings: [125000, 110000, 120000, 108000, 102000, 123333],
},
],
};
// const testObj = {
// userProfile: {
// displayName: "Elon Musk",
// email: "[email protected]",
// bio: "I like running!",
// dp: "https://techcrunch.com/wp-content/uploads/2021/01/GettyImages-1229901940.jpg?w=730&crop=1",
// badges: ["Top Runner", "5K Finisher", "IPPT Gold"],
// },
// workout: {
// date: Date.now(),
// difficultyMultiplier: 91,
// review: {
// feeling: "good",
// reflection: "This was one of the toughest training I have faced in a long time.",
// },
// parts: [
// {
// distance: 300,
// pace: 25000,
// part_ID: "1006_0",
// rest: 115,
// restMultiplier: 4.5,
// sets: 8,
// timings: [82000, 207000, 83000, 79430, 78236],
// },
// ],
// personalisedDifficultyMultiplier: 160.0173922309409,
// segment: "primary",
// type: "Distance Interval",
// workout_ID: "1006",
// feeling: "good",
// },
// comments: {
// adfalksdfh: {
// time: Date.now(),
// text: "Daddy Musk is the best!",
// userProfile: {
// displayName: "X Æ A-12",
// email: "[email protected]",
// bio: "I am human!!! MUHAHAHA",
// badges: ["Baby Musk", "Best Name", "5m Finisher"],
// dp: "https://static.scientificamerican.com/blogs/cache/file/7069F0BB-A9AB-4932-84F508BBC0136458_source.jpg?w=590&h=800&F754D658-CE37-41EE-BE2C0EFC7134D582",
// },
// },
// },
// }; | }
if (diffs.vDiff && !(diffs.ltDiff && diffs.stDiff)) {
if (!diffs.ltDiff) {
diffs.ltDiff = diffs.vDiff - intermediateFunc(deltas[2], ltVelocity, vVelocity); | random_line_split |
knapsack.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size) | new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show() | action = (np.argmax(qval))
# Take action, observe new state S' | random_line_split |
knapsack.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
|
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
| filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close | identifier_body |
knapsack.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def | (file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
| read_file | identifier_name |
knapsack.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
|
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
| return x_test, ticker | conditional_block |
DAC16bit.py | from qcodes.instrument.base import Instrument
import types
import logging
import numpy as np
import serial
import visa
import traceback
import threading
import time
from qcodes import VisaInstrument, validators as vals
from qcodes.instrument.parameter import ManualParameter
from qcodes.utils.validators import Bool, Numbers
Fullrange = 4000
Halfrange = Fullrange / 2
class IST_16(Instrument):
def __init__(self, name, interface = 'COM9', reset=False, numdacs=16, dac_step=10,dac_delay=.1, safe_version=True,
polarity=['BIP', 'BIP', 'BIP', 'BIP'],
use_locks=False,**kwargs):
t0 = time.time()
super().__init__(name, **kwargs)
| self.Halfrange = Halfrange
self.communication_bytes = 3
if numdacs % 4 == 0 and numdacs > 0:
self._numdacs = int(numdacs)
else:
logging.error('Number of dacs needs to be multiple of 4')
# initialize pol_num, the voltage offset due to the polarity
self.pol_num = np.zeros(self._numdacs)
for i in range(int(self._numdacs / 4)):
self.set_pol_dacrack(polarity[i], np.arange(1 + i * 4, 1 + (i + 1) * 4),
get_all=False)
# Add functions
#self.add_function('get_all')
#self.add_function('set_dacs_zero')
#self.add_function('reinitialize_dacs')
for i in range(1, numdacs + 1):
self.add_parameter(
'dac{}'.format(i),
label='Dac {}'.format(i),
unit='mV',
get_cmd=self._gen_ch_get_func(self.do_get_dac, i),
set_cmd=self._gen_ch_set_func(self.do_set_dac, i),
vals=vals.Numbers(self.pol_num[i - 1]-1,
self.pol_num[i - 1] + self.Fullrange+1),
step=dac_step,
delay=dac_delay,
max_val_age=10)
self._open_serial_connection()
#open serial connection
def _open_serial_connection(self):
self.ser = serial.Serial()
self.ser.port = self._interface
self.ser.baudrate = 10000000
self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes
self.ser.parity = serial.PARITY_ODD #set parity check: no parity
self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits
self.ser.timeout = 1 #non-block read
self.ser.xonxoff = False #disable software flow control
self.ser.rtscts = False #disable hardware (RTS/CTS) flow control
self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
try:
self.ser.open()
except:
logging.warning('Error open serial port')
print ('error open serial port')
self.ser.close()
self.ser.open()
raise Exception()
if not self.ser.isOpen():
logging.error('Serial port not open')
print ('serial port not open')
raise Exception()
logging.info('Serial port opened: ' + self.ser.portstr)
# close serial connection
def _close_serial_connection(self):
'''
Closes the serial connection
Input:
None
Output:
None
'''
logging.debug('Closing serial connection')
print ('closing serial connection')
# vpp43.close(self._vi) # OLD
self.ser.close()
def reset(self):
'''
Resets all dacs to 0 volts
Input:
None
Output:
None
'''
logging.info('Resetting instrument')
self.set_dacs_zero()
self.get_all()
def set_dacs_zero(self):
for i in range(self._numdacs):
self.do_set_dac(0,i+1)
def reinitialize_dacs(self):
bytetosend = 0b11100000 #111 is a re init all dacs
message = "%c" % (bytetosend)
reply = self._send_and_read(message.encode(), self.communication_bytes)
return reply
# Conversion of data
def _mvoltage_to_bytes(self, mvoltage):
'''
Converts a mvoltage on a -10V to 10V scale to a 20-bit equivalent
output is a list of three bytes
Input:
mvoltage (float) : a mvoltage in the 0mV-4000mV range
Output:
(dataH, dataL) (int, int) : The high and low value byte equivalent
'''
#//+10V=01111111111111111111
#//-10V=10000000000000000000
# logging.info('mvoltstobytes, voltage:')
# logging.info(mvoltage)
# if(mvoltage>0):
# data = int(((float(mvoltage)/1000)+2)*((2**16-1)/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# #data = data | 0b10000000000000000000
# logging.info("negative, data:")
# #data = bin(data)
# logging.info(data)
#bytevalue = int(round(mvoltage/4000.0*65535))
#dataH = int(bytevalue/256)
#dataL = bytevalue - dataH*256
#return (dataH, dataL)
bytevalue = int(round((mvoltage+self.Halfrange) / self.Fullrange * 65535))
return bytevalue
# def _numbers_to_mvoltages(self, numbers):
# '''
# Converts a list of bytes to a list containing
# the corresponding mvoltages
# '''
# values = np.ones(self._numdacs) #initializes the values array to all ones
# #//calculate the bits to send to the dac out of the input values
# #//D= 20bit input code
# for i in range(self._numdacs):
# bitValue = ((numbers[5 + 3*i]<<8) + (numbers[6 + 3*i]<<0))
# if (bitValue & 0b1000000000000000): #check if the number is positive
# #logging.info(i)
# #logging.info('negative number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=(float(bitValue)/((2**16-1)/2))*(self.Halfrange/1000) #multiply with 2V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# else:
# #logging.info(i)
# #logging.info('positive number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=-(self.Halfrange/1000)+(float(bitValue)/(65536.0/2))*(self.Halfrange/1000) #multiply with 10V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# #values[i] = int(((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)),2)-(1<<20)
# #values[i] = (( 20*((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)) )/ 1048575.0) + 10
# #logging.info('DAC: ')
# #logging.info(numbers[4 + 4*i] )
# #logging.info('Val: ')
# #logging.info(values[i])
# return values
# #return numbers
def _numbers_to_mvoltages(self, byte_mess):
'''
Converts a list of bytes to a list containing
the corresponding mvoltages
'''
values = list(range(self._numdacs))
for i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communication with device
def do_get_dac(self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages[channel - 1]
def do_set_dac(self, mvoltage, channel):
'''
Sets the specified dac to the specified voltage
Input:
mvoltage (float) : output voltage in mV
channel (int) : 1 based index of the dac
Output:
reply (string) : errormessage
'''
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
#logging.info('mvoltage after m_to_bytes: ')
#logging.info(mvoltage)
#logging.info('bin(channel: ')
#logging.info(bin(channel))
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)] #0xff is 255
channel = (int(channel)-1) | 0b10000000 #100 is a write operation
#message = "%c%c%c%c" % (channel,mvoltage_bytes[0], mvoltage_bytes[1], mvoltage_bytes[2])
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
#logging.info('bin(message: ')
#logging.info(bin(mvoltage_bytes[0]))
#logging.info(bin(mvoltage_bytes[1]))
#logging.info(bin(mvoltage_bytes[2]))
#logging.info('message: ')
#logging.info(message)
reply = self._send_and_read(message, self.communication_bytes)
#logging.info('bin(reply: ')
#logging.info(bin(reply[0]))
#logging.info(bin(reply[1]))
#logging.info(bin(reply[2]))
#logging.info(bin(reply[3]))
return reply
def do_set_dac_fast(self, mvoltage, channel): #added by Daniel, seems to work
if channel>4:
print('Error: Only channels 1-4 have fast setting.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b11000000 #110 is a write fast operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, 0)
return reply
def do_ramp_dac(self, mvoltage, channel): #added by Daniel, fucks it up completly right now...
if channel>2:
print('Error: Only channels 1-2 have ramping.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b10100000 #110 is a ramp operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, self.communication_bytes)
return reply
def do_set_trigger(self):
'''
Sets the trigger; trigger is 1ms and around 4.2V
Input:
none
Output:
reply (string) : errormessage
'''
logging.debug('Trigger out')
message = "%c%c%c%c" % (4, 0, 2, 6)
reply = self._send_and_read(message.encode())
return reply
def get_dacs(self):
mvoltages = self._get_dacs()
for i in range(self._numdacs):
print('dac{}: '.format(i+1)+str(mvoltages[i]))
return mvoltages
def _get_dacs(self):
'''
Reads from device and returns all dacvoltages in a list
Input:
None
Output:
voltages (float[]) : list containing all dacvoltages (in mV)
'''
logging.debug('Getting dac voltages from instrument')
# first 3 bit are control, last 5 DAC number
message = '\x40' #0b01000000 = 010 = read all dacs
#logging.info(sys.getsizeof(message))
reply = self._send_and_read(message.encode(), self._numdacs*self.communication_bytes+4)
#logging.info(reply)
mvoltages = self._numbers_to_mvoltages(reply)
return mvoltages
def _send_and_read(self, message, bytestoread):
'''
Send <message> to the device and read answer.
Raises an error if one occurred
Returns a list of bytes
Input:
message (string) : string conform the IST_20 protocol
Output:
data_out_numbers (int[]) : return message
'''
logging.info('Sending %r', message)
# clear input buffer
self.ser.flushInput()
#logging.info('Flushed input')
#vpp43.write(self._vi, message) # OLD
self.ser.write(message) # NEW
#logging.info('Wrote Message')
# In stead of blocking, we could also poll, but it's a bit slower
# print visafunc.get_navail(self.lib, self._vi)
# if not visafunc.wait_data(self._vi, 2, 0.5):
# logging.error('Failed to receive reply from IST_20 rack')
# return False
#data1 = visafunc.readn(self._vi, 2) # OLD
#sleep(2)
#logging.info(self.ser.readline())
s=0
data1 = []
while s < bytestoread:
data1.append(ord(self.ser.read()))
#logging.info(s)
s=s+1
#data1 = [ord(s) for s in data1]
#data2 = np.reshape(data1,(-1,4))
#logging.info('finished reading')
#data2 = np.uint32(data1) #from string to 32bit
data2 = data1
#logging.info('converted to uint32')
#logging.info('sendAndRead: %s', data2)
return data2
def set_pol_dacrack(self, flag, channels, get_all=True):
'''
Changes the polarity of the specified set of dacs
Input:
flag (string) : 'BIP', 'POS' or 'NEG'
channel (int) : 0 based index of the rack
get_all (boolean): if True (default) perform a get_all
Output:
None
'''
flagmap = {'NEG': -self.Fullrange, 'BIP': -self.Halfrange, 'POS': 0}
if flag.upper() not in flagmap:
raise KeyError('Tried to set invalid dac polarity %s', flag)
val = flagmap[flag.upper()]
for ch in channels:
self.pol_num[ch - 1] = val
# self.set_parameter_bounds('dac%d' % (i+1), val, val +
# self.Fullrange.0)
if get_all:
self.get_all()
def get_pol_dac(self, channel):
'''
Returns the polarity of the dac channel specified
Input:
channel (int) : 1 based index of the dac
Output:
polarity (string) : 'BIP', 'POS' or 'NEG'
'''
val = self.pol_num[channel - 1]
if (val == -self.Fullrange):
return 'NEG'
elif (val == -self.Halfrange):
return 'BIP'
elif (val == 0):
return 'POS'
else:
return 'Invalid polarity in memory'
def get_numdacs(self):
'''
Get the number of DACS.
'''
return self._numdacs
def _gen_ch_set_func(self, fun, ch):
def set_func(val):
return fun(val, ch)
return set_func
def _gen_ch_get_func(self, fun, ch):
def get_func():
return fun(ch)
return get_func
def get_all(self):
return self.snapshot(update=True) | self._interface = interface
self.Fullrange = Fullrange | random_line_split |
DAC16bit.py | from qcodes.instrument.base import Instrument
import types
import logging
import numpy as np
import serial
import visa
import traceback
import threading
import time
from qcodes import VisaInstrument, validators as vals
from qcodes.instrument.parameter import ManualParameter
from qcodes.utils.validators import Bool, Numbers
Fullrange = 4000
Halfrange = Fullrange / 2
class IST_16(Instrument):
def __init__(self, name, interface = 'COM9', reset=False, numdacs=16, dac_step=10,dac_delay=.1, safe_version=True,
polarity=['BIP', 'BIP', 'BIP', 'BIP'],
use_locks=False,**kwargs):
t0 = time.time()
super().__init__(name, **kwargs)
self._interface = interface
self.Fullrange = Fullrange
self.Halfrange = Halfrange
self.communication_bytes = 3
if numdacs % 4 == 0 and numdacs > 0:
self._numdacs = int(numdacs)
else:
logging.error('Number of dacs needs to be multiple of 4')
# initialize pol_num, the voltage offset due to the polarity
self.pol_num = np.zeros(self._numdacs)
for i in range(int(self._numdacs / 4)):
self.set_pol_dacrack(polarity[i], np.arange(1 + i * 4, 1 + (i + 1) * 4),
get_all=False)
# Add functions
#self.add_function('get_all')
#self.add_function('set_dacs_zero')
#self.add_function('reinitialize_dacs')
for i in range(1, numdacs + 1):
self.add_parameter(
'dac{}'.format(i),
label='Dac {}'.format(i),
unit='mV',
get_cmd=self._gen_ch_get_func(self.do_get_dac, i),
set_cmd=self._gen_ch_set_func(self.do_set_dac, i),
vals=vals.Numbers(self.pol_num[i - 1]-1,
self.pol_num[i - 1] + self.Fullrange+1),
step=dac_step,
delay=dac_delay,
max_val_age=10)
self._open_serial_connection()
#open serial connection
def _open_serial_connection(self):
self.ser = serial.Serial()
self.ser.port = self._interface
self.ser.baudrate = 10000000
self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes
self.ser.parity = serial.PARITY_ODD #set parity check: no parity
self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits
self.ser.timeout = 1 #non-block read
self.ser.xonxoff = False #disable software flow control
self.ser.rtscts = False #disable hardware (RTS/CTS) flow control
self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
try:
self.ser.open()
except:
logging.warning('Error open serial port')
print ('error open serial port')
self.ser.close()
self.ser.open()
raise Exception()
if not self.ser.isOpen():
logging.error('Serial port not open')
print ('serial port not open')
raise Exception()
logging.info('Serial port opened: ' + self.ser.portstr)
# close serial connection
def _close_serial_connection(self):
'''
Closes the serial connection
Input:
None
Output:
None
'''
logging.debug('Closing serial connection')
print ('closing serial connection')
# vpp43.close(self._vi) # OLD
self.ser.close()
def reset(self):
'''
Resets all dacs to 0 volts
Input:
None
Output:
None
'''
logging.info('Resetting instrument')
self.set_dacs_zero()
self.get_all()
def set_dacs_zero(self):
for i in range(self._numdacs):
self.do_set_dac(0,i+1)
def reinitialize_dacs(self):
bytetosend = 0b11100000 #111 is a re init all dacs
message = "%c" % (bytetosend)
reply = self._send_and_read(message.encode(), self.communication_bytes)
return reply
# Conversion of data
def _mvoltage_to_bytes(self, mvoltage):
'''
Converts a mvoltage on a -10V to 10V scale to a 20-bit equivalent
output is a list of three bytes
Input:
mvoltage (float) : a mvoltage in the 0mV-4000mV range
Output:
(dataH, dataL) (int, int) : The high and low value byte equivalent
'''
#//+10V=01111111111111111111
#//-10V=10000000000000000000
# logging.info('mvoltstobytes, voltage:')
# logging.info(mvoltage)
# if(mvoltage>0):
# data = int(((float(mvoltage)/1000)+2)*((2**16-1)/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# #data = data | 0b10000000000000000000
# logging.info("negative, data:")
# #data = bin(data)
# logging.info(data)
#bytevalue = int(round(mvoltage/4000.0*65535))
#dataH = int(bytevalue/256)
#dataL = bytevalue - dataH*256
#return (dataH, dataL)
bytevalue = int(round((mvoltage+self.Halfrange) / self.Fullrange * 65535))
return bytevalue
# def _numbers_to_mvoltages(self, numbers):
# '''
# Converts a list of bytes to a list containing
# the corresponding mvoltages
# '''
# values = np.ones(self._numdacs) #initializes the values array to all ones
# #//calculate the bits to send to the dac out of the input values
# #//D= 20bit input code
# for i in range(self._numdacs):
# bitValue = ((numbers[5 + 3*i]<<8) + (numbers[6 + 3*i]<<0))
# if (bitValue & 0b1000000000000000): #check if the number is positive
# #logging.info(i)
# #logging.info('negative number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=(float(bitValue)/((2**16-1)/2))*(self.Halfrange/1000) #multiply with 2V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# else:
# #logging.info(i)
# #logging.info('positive number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=-(self.Halfrange/1000)+(float(bitValue)/(65536.0/2))*(self.Halfrange/1000) #multiply with 10V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# #values[i] = int(((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)),2)-(1<<20)
# #values[i] = (( 20*((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)) )/ 1048575.0) + 10
# #logging.info('DAC: ')
# #logging.info(numbers[4 + 4*i] )
# #logging.info('Val: ')
# #logging.info(values[i])
# return values
# #return numbers
def _numbers_to_mvoltages(self, byte_mess):
'''
Converts a list of bytes to a list containing
the corresponding mvoltages
'''
values = list(range(self._numdacs))
for i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communication with device
def | (self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages[channel - 1]
def do_set_dac(self, mvoltage, channel):
'''
Sets the specified dac to the specified voltage
Input:
mvoltage (float) : output voltage in mV
channel (int) : 1 based index of the dac
Output:
reply (string) : errormessage
'''
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
#logging.info('mvoltage after m_to_bytes: ')
#logging.info(mvoltage)
#logging.info('bin(channel: ')
#logging.info(bin(channel))
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)] #0xff is 255
channel = (int(channel)-1) | 0b10000000 #100 is a write operation
#message = "%c%c%c%c" % (channel,mvoltage_bytes[0], mvoltage_bytes[1], mvoltage_bytes[2])
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
#logging.info('bin(message: ')
#logging.info(bin(mvoltage_bytes[0]))
#logging.info(bin(mvoltage_bytes[1]))
#logging.info(bin(mvoltage_bytes[2]))
#logging.info('message: ')
#logging.info(message)
reply = self._send_and_read(message, self.communication_bytes)
#logging.info('bin(reply: ')
#logging.info(bin(reply[0]))
#logging.info(bin(reply[1]))
#logging.info(bin(reply[2]))
#logging.info(bin(reply[3]))
return reply
def do_set_dac_fast(self, mvoltage, channel): #added by Daniel, seems to work
if channel>4:
print('Error: Only channels 1-4 have fast setting.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b11000000 #110 is a write fast operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, 0)
return reply
def do_ramp_dac(self, mvoltage, channel): #added by Daniel, fucks it up completly right now...
if channel>2:
print('Error: Only channels 1-2 have ramping.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b10100000 #110 is a ramp operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, self.communication_bytes)
return reply
def do_set_trigger(self):
'''
Sets the trigger; trigger is 1ms and around 4.2V
Input:
none
Output:
reply (string) : errormessage
'''
logging.debug('Trigger out')
message = "%c%c%c%c" % (4, 0, 2, 6)
reply = self._send_and_read(message.encode())
return reply
def get_dacs(self):
mvoltages = self._get_dacs()
for i in range(self._numdacs):
print('dac{}: '.format(i+1)+str(mvoltages[i]))
return mvoltages
def _get_dacs(self):
'''
Reads from device and returns all dacvoltages in a list
Input:
None
Output:
voltages (float[]) : list containing all dacvoltages (in mV)
'''
logging.debug('Getting dac voltages from instrument')
# first 3 bit are control, last 5 DAC number
message = '\x40' #0b01000000 = 010 = read all dacs
#logging.info(sys.getsizeof(message))
reply = self._send_and_read(message.encode(), self._numdacs*self.communication_bytes+4)
#logging.info(reply)
mvoltages = self._numbers_to_mvoltages(reply)
return mvoltages
def _send_and_read(self, message, bytestoread):
'''
Send <message> to the device and read answer.
Raises an error if one occurred
Returns a list of bytes
Input:
message (string) : string conform the IST_20 protocol
Output:
data_out_numbers (int[]) : return message
'''
logging.info('Sending %r', message)
# clear input buffer
self.ser.flushInput()
#logging.info('Flushed input')
#vpp43.write(self._vi, message) # OLD
self.ser.write(message) # NEW
#logging.info('Wrote Message')
# In stead of blocking, we could also poll, but it's a bit slower
# print visafunc.get_navail(self.lib, self._vi)
# if not visafunc.wait_data(self._vi, 2, 0.5):
# logging.error('Failed to receive reply from IST_20 rack')
# return False
#data1 = visafunc.readn(self._vi, 2) # OLD
#sleep(2)
#logging.info(self.ser.readline())
s=0
data1 = []
while s < bytestoread:
data1.append(ord(self.ser.read()))
#logging.info(s)
s=s+1
#data1 = [ord(s) for s in data1]
#data2 = np.reshape(data1,(-1,4))
#logging.info('finished reading')
#data2 = np.uint32(data1) #from string to 32bit
data2 = data1
#logging.info('converted to uint32')
#logging.info('sendAndRead: %s', data2)
return data2
def set_pol_dacrack(self, flag, channels, get_all=True):
'''
Changes the polarity of the specified set of dacs
Input:
flag (string) : 'BIP', 'POS' or 'NEG'
channel (int) : 0 based index of the rack
get_all (boolean): if True (default) perform a get_all
Output:
None
'''
flagmap = {'NEG': -self.Fullrange, 'BIP': -self.Halfrange, 'POS': 0}
if flag.upper() not in flagmap:
raise KeyError('Tried to set invalid dac polarity %s', flag)
val = flagmap[flag.upper()]
for ch in channels:
self.pol_num[ch - 1] = val
# self.set_parameter_bounds('dac%d' % (i+1), val, val +
# self.Fullrange.0)
if get_all:
self.get_all()
def get_pol_dac(self, channel):
'''
Returns the polarity of the dac channel specified
Input:
channel (int) : 1 based index of the dac
Output:
polarity (string) : 'BIP', 'POS' or 'NEG'
'''
val = self.pol_num[channel - 1]
if (val == -self.Fullrange):
return 'NEG'
elif (val == -self.Halfrange):
return 'BIP'
elif (val == 0):
return 'POS'
else:
return 'Invalid polarity in memory'
def get_numdacs(self):
'''
Get the number of DACS.
'''
return self._numdacs
def _gen_ch_set_func(self, fun, ch):
def set_func(val):
return fun(val, ch)
return set_func
def _gen_ch_get_func(self, fun, ch):
def get_func():
return fun(ch)
return get_func
def get_all(self):
return self.snapshot(update=True) | do_get_dac | identifier_name |
DAC16bit.py | from qcodes.instrument.base import Instrument
import types
import logging
import numpy as np
import serial
import visa
import traceback
import threading
import time
from qcodes import VisaInstrument, validators as vals
from qcodes.instrument.parameter import ManualParameter
from qcodes.utils.validators import Bool, Numbers
Fullrange = 4000
Halfrange = Fullrange / 2
class IST_16(Instrument):
def __init__(self, name, interface = 'COM9', reset=False, numdacs=16, dac_step=10,dac_delay=.1, safe_version=True,
polarity=['BIP', 'BIP', 'BIP', 'BIP'],
use_locks=False,**kwargs):
t0 = time.time()
super().__init__(name, **kwargs)
self._interface = interface
self.Fullrange = Fullrange
self.Halfrange = Halfrange
self.communication_bytes = 3
if numdacs % 4 == 0 and numdacs > 0:
self._numdacs = int(numdacs)
else:
logging.error('Number of dacs needs to be multiple of 4')
# initialize pol_num, the voltage offset due to the polarity
self.pol_num = np.zeros(self._numdacs)
for i in range(int(self._numdacs / 4)):
self.set_pol_dacrack(polarity[i], np.arange(1 + i * 4, 1 + (i + 1) * 4),
get_all=False)
# Add functions
#self.add_function('get_all')
#self.add_function('set_dacs_zero')
#self.add_function('reinitialize_dacs')
for i in range(1, numdacs + 1):
self.add_parameter(
'dac{}'.format(i),
label='Dac {}'.format(i),
unit='mV',
get_cmd=self._gen_ch_get_func(self.do_get_dac, i),
set_cmd=self._gen_ch_set_func(self.do_set_dac, i),
vals=vals.Numbers(self.pol_num[i - 1]-1,
self.pol_num[i - 1] + self.Fullrange+1),
step=dac_step,
delay=dac_delay,
max_val_age=10)
self._open_serial_connection()
#open serial connection
def _open_serial_connection(self):
self.ser = serial.Serial()
self.ser.port = self._interface
self.ser.baudrate = 10000000
self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes
self.ser.parity = serial.PARITY_ODD #set parity check: no parity
self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits
self.ser.timeout = 1 #non-block read
self.ser.xonxoff = False #disable software flow control
self.ser.rtscts = False #disable hardware (RTS/CTS) flow control
self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
try:
self.ser.open()
except:
logging.warning('Error open serial port')
print ('error open serial port')
self.ser.close()
self.ser.open()
raise Exception()
if not self.ser.isOpen():
logging.error('Serial port not open')
print ('serial port not open')
raise Exception()
logging.info('Serial port opened: ' + self.ser.portstr)
# close serial connection
def _close_serial_connection(self):
'''
Closes the serial connection
Input:
None
Output:
None
'''
logging.debug('Closing serial connection')
print ('closing serial connection')
# vpp43.close(self._vi) # OLD
self.ser.close()
def reset(self):
'''
Resets all dacs to 0 volts
Input:
None
Output:
None
'''
logging.info('Resetting instrument')
self.set_dacs_zero()
self.get_all()
def set_dacs_zero(self):
for i in range(self._numdacs):
self.do_set_dac(0,i+1)
def reinitialize_dacs(self):
bytetosend = 0b11100000 #111 is a re init all dacs
message = "%c" % (bytetosend)
reply = self._send_and_read(message.encode(), self.communication_bytes)
return reply
# Conversion of data
def _mvoltage_to_bytes(self, mvoltage):
'''
Converts a mvoltage on a -10V to 10V scale to a 20-bit equivalent
output is a list of three bytes
Input:
mvoltage (float) : a mvoltage in the 0mV-4000mV range
Output:
(dataH, dataL) (int, int) : The high and low value byte equivalent
'''
#//+10V=01111111111111111111
#//-10V=10000000000000000000
# logging.info('mvoltstobytes, voltage:')
# logging.info(mvoltage)
# if(mvoltage>0):
# data = int(((float(mvoltage)/1000)+2)*((2**16-1)/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# #data = data | 0b10000000000000000000
# logging.info("negative, data:")
# #data = bin(data)
# logging.info(data)
#bytevalue = int(round(mvoltage/4000.0*65535))
#dataH = int(bytevalue/256)
#dataL = bytevalue - dataH*256
#return (dataH, dataL)
bytevalue = int(round((mvoltage+self.Halfrange) / self.Fullrange * 65535))
return bytevalue
# def _numbers_to_mvoltages(self, numbers):
# '''
# Converts a list of bytes to a list containing
# the corresponding mvoltages
# '''
# values = np.ones(self._numdacs) #initializes the values array to all ones
# #//calculate the bits to send to the dac out of the input values
# #//D= 20bit input code
# for i in range(self._numdacs):
# bitValue = ((numbers[5 + 3*i]<<8) + (numbers[6 + 3*i]<<0))
# if (bitValue & 0b1000000000000000): #check if the number is positive
# #logging.info(i)
# #logging.info('negative number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=(float(bitValue)/((2**16-1)/2))*(self.Halfrange/1000) #multiply with 2V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# else:
# #logging.info(i)
# #logging.info('positive number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=-(self.Halfrange/1000)+(float(bitValue)/(65536.0/2))*(self.Halfrange/1000) #multiply with 10V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# #values[i] = int(((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)),2)-(1<<20)
# #values[i] = (( 20*((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)) )/ 1048575.0) + 10
# #logging.info('DAC: ')
# #logging.info(numbers[4 + 4*i] )
# #logging.info('Val: ')
# #logging.info(values[i])
# return values
# #return numbers
def _numbers_to_mvoltages(self, byte_mess):
'''
Converts a list of bytes to a list containing
the corresponding mvoltages
'''
values = list(range(self._numdacs))
for i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communication with device
def do_get_dac(self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages[channel - 1]
def do_set_dac(self, mvoltage, channel):
'''
Sets the specified dac to the specified voltage
Input:
mvoltage (float) : output voltage in mV
channel (int) : 1 based index of the dac
Output:
reply (string) : errormessage
'''
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
#logging.info('mvoltage after m_to_bytes: ')
#logging.info(mvoltage)
#logging.info('bin(channel: ')
#logging.info(bin(channel))
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)] #0xff is 255
channel = (int(channel)-1) | 0b10000000 #100 is a write operation
#message = "%c%c%c%c" % (channel,mvoltage_bytes[0], mvoltage_bytes[1], mvoltage_bytes[2])
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
#logging.info('bin(message: ')
#logging.info(bin(mvoltage_bytes[0]))
#logging.info(bin(mvoltage_bytes[1]))
#logging.info(bin(mvoltage_bytes[2]))
#logging.info('message: ')
#logging.info(message)
reply = self._send_and_read(message, self.communication_bytes)
#logging.info('bin(reply: ')
#logging.info(bin(reply[0]))
#logging.info(bin(reply[1]))
#logging.info(bin(reply[2]))
#logging.info(bin(reply[3]))
return reply
def do_set_dac_fast(self, mvoltage, channel): #added by Daniel, seems to work
if channel>4:
print('Error: Only channels 1-4 have fast setting.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b11000000 #110 is a write fast operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, 0)
return reply
def do_ramp_dac(self, mvoltage, channel): #added by Daniel, fucks it up completly right now...
|
def do_set_trigger(self):
'''
Sets the trigger; trigger is 1ms and around 4.2V
Input:
none
Output:
reply (string) : errormessage
'''
logging.debug('Trigger out')
message = "%c%c%c%c" % (4, 0, 2, 6)
reply = self._send_and_read(message.encode())
return reply
def get_dacs(self):
mvoltages = self._get_dacs()
for i in range(self._numdacs):
print('dac{}: '.format(i+1)+str(mvoltages[i]))
return mvoltages
def _get_dacs(self):
'''
Reads from device and returns all dacvoltages in a list
Input:
None
Output:
voltages (float[]) : list containing all dacvoltages (in mV)
'''
logging.debug('Getting dac voltages from instrument')
# first 3 bit are control, last 5 DAC number
message = '\x40' #0b01000000 = 010 = read all dacs
#logging.info(sys.getsizeof(message))
reply = self._send_and_read(message.encode(), self._numdacs*self.communication_bytes+4)
#logging.info(reply)
mvoltages = self._numbers_to_mvoltages(reply)
return mvoltages
def _send_and_read(self, message, bytestoread):
'''
Send <message> to the device and read answer.
Raises an error if one occurred
Returns a list of bytes
Input:
message (string) : string conform the IST_20 protocol
Output:
data_out_numbers (int[]) : return message
'''
logging.info('Sending %r', message)
# clear input buffer
self.ser.flushInput()
#logging.info('Flushed input')
#vpp43.write(self._vi, message) # OLD
self.ser.write(message) # NEW
#logging.info('Wrote Message')
# In stead of blocking, we could also poll, but it's a bit slower
# print visafunc.get_navail(self.lib, self._vi)
# if not visafunc.wait_data(self._vi, 2, 0.5):
# logging.error('Failed to receive reply from IST_20 rack')
# return False
#data1 = visafunc.readn(self._vi, 2) # OLD
#sleep(2)
#logging.info(self.ser.readline())
s=0
data1 = []
while s < bytestoread:
data1.append(ord(self.ser.read()))
#logging.info(s)
s=s+1
#data1 = [ord(s) for s in data1]
#data2 = np.reshape(data1,(-1,4))
#logging.info('finished reading')
#data2 = np.uint32(data1) #from string to 32bit
data2 = data1
#logging.info('converted to uint32')
#logging.info('sendAndRead: %s', data2)
return data2
def set_pol_dacrack(self, flag, channels, get_all=True):
'''
Changes the polarity of the specified set of dacs
Input:
flag (string) : 'BIP', 'POS' or 'NEG'
channel (int) : 0 based index of the rack
get_all (boolean): if True (default) perform a get_all
Output:
None
'''
flagmap = {'NEG': -self.Fullrange, 'BIP': -self.Halfrange, 'POS': 0}
if flag.upper() not in flagmap:
raise KeyError('Tried to set invalid dac polarity %s', flag)
val = flagmap[flag.upper()]
for ch in channels:
self.pol_num[ch - 1] = val
# self.set_parameter_bounds('dac%d' % (i+1), val, val +
# self.Fullrange.0)
if get_all:
self.get_all()
def get_pol_dac(self, channel):
'''
Returns the polarity of the dac channel specified
Input:
channel (int) : 1 based index of the dac
Output:
polarity (string) : 'BIP', 'POS' or 'NEG'
'''
val = self.pol_num[channel - 1]
if (val == -self.Fullrange):
return 'NEG'
elif (val == -self.Halfrange):
return 'BIP'
elif (val == 0):
return 'POS'
else:
return 'Invalid polarity in memory'
def get_numdacs(self):
'''
Get the number of DACS.
'''
return self._numdacs
def _gen_ch_set_func(self, fun, ch):
def set_func(val):
return fun(val, ch)
return set_func
def _gen_ch_get_func(self, fun, ch):
def get_func():
return fun(ch)
return get_func
def get_all(self):
return self.snapshot(update=True) | if channel>2:
print('Error: Only channels 1-2 have ramping.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b10100000 #110 is a ramp operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, self.communication_bytes)
return reply | identifier_body |
DAC16bit.py | from qcodes.instrument.base import Instrument
import types
import logging
import numpy as np
import serial
import visa
import traceback
import threading
import time
from qcodes import VisaInstrument, validators as vals
from qcodes.instrument.parameter import ManualParameter
from qcodes.utils.validators import Bool, Numbers
Fullrange = 4000
Halfrange = Fullrange / 2
class IST_16(Instrument):
def __init__(self, name, interface = 'COM9', reset=False, numdacs=16, dac_step=10,dac_delay=.1, safe_version=True,
polarity=['BIP', 'BIP', 'BIP', 'BIP'],
use_locks=False,**kwargs):
t0 = time.time()
super().__init__(name, **kwargs)
self._interface = interface
self.Fullrange = Fullrange
self.Halfrange = Halfrange
self.communication_bytes = 3
if numdacs % 4 == 0 and numdacs > 0:
self._numdacs = int(numdacs)
else:
logging.error('Number of dacs needs to be multiple of 4')
# initialize pol_num, the voltage offset due to the polarity
self.pol_num = np.zeros(self._numdacs)
for i in range(int(self._numdacs / 4)):
self.set_pol_dacrack(polarity[i], np.arange(1 + i * 4, 1 + (i + 1) * 4),
get_all=False)
# Add functions
#self.add_function('get_all')
#self.add_function('set_dacs_zero')
#self.add_function('reinitialize_dacs')
for i in range(1, numdacs + 1):
self.add_parameter(
'dac{}'.format(i),
label='Dac {}'.format(i),
unit='mV',
get_cmd=self._gen_ch_get_func(self.do_get_dac, i),
set_cmd=self._gen_ch_set_func(self.do_set_dac, i),
vals=vals.Numbers(self.pol_num[i - 1]-1,
self.pol_num[i - 1] + self.Fullrange+1),
step=dac_step,
delay=dac_delay,
max_val_age=10)
self._open_serial_connection()
#open serial connection
def _open_serial_connection(self):
self.ser = serial.Serial()
self.ser.port = self._interface
self.ser.baudrate = 10000000
self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes
self.ser.parity = serial.PARITY_ODD #set parity check: no parity
self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits
self.ser.timeout = 1 #non-block read
self.ser.xonxoff = False #disable software flow control
self.ser.rtscts = False #disable hardware (RTS/CTS) flow control
self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
try:
self.ser.open()
except:
logging.warning('Error open serial port')
print ('error open serial port')
self.ser.close()
self.ser.open()
raise Exception()
if not self.ser.isOpen():
logging.error('Serial port not open')
print ('serial port not open')
raise Exception()
logging.info('Serial port opened: ' + self.ser.portstr)
# close serial connection
def _close_serial_connection(self):
'''
Closes the serial connection
Input:
None
Output:
None
'''
logging.debug('Closing serial connection')
print ('closing serial connection')
# vpp43.close(self._vi) # OLD
self.ser.close()
def reset(self):
'''
Resets all dacs to 0 volts
Input:
None
Output:
None
'''
logging.info('Resetting instrument')
self.set_dacs_zero()
self.get_all()
def set_dacs_zero(self):
for i in range(self._numdacs):
self.do_set_dac(0,i+1)
def reinitialize_dacs(self):
bytetosend = 0b11100000 #111 is a re init all dacs
message = "%c" % (bytetosend)
reply = self._send_and_read(message.encode(), self.communication_bytes)
return reply
# Conversion of data
def _mvoltage_to_bytes(self, mvoltage):
'''
Converts a mvoltage on a -10V to 10V scale to a 20-bit equivalent
output is a list of three bytes
Input:
mvoltage (float) : a mvoltage in the 0mV-4000mV range
Output:
(dataH, dataL) (int, int) : The high and low value byte equivalent
'''
#//+10V=01111111111111111111
#//-10V=10000000000000000000
# logging.info('mvoltstobytes, voltage:')
# logging.info(mvoltage)
# if(mvoltage>0):
# data = int(((float(mvoltage)/1000)+2)*((2**16-1)/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# #data = data | 0b10000000000000000000
# logging.info("negative, data:")
# #data = bin(data)
# logging.info(data)
#bytevalue = int(round(mvoltage/4000.0*65535))
#dataH = int(bytevalue/256)
#dataL = bytevalue - dataH*256
#return (dataH, dataL)
bytevalue = int(round((mvoltage+self.Halfrange) / self.Fullrange * 65535))
return bytevalue
# def _numbers_to_mvoltages(self, numbers):
# '''
# Converts a list of bytes to a list containing
# the corresponding mvoltages
# '''
# values = np.ones(self._numdacs) #initializes the values array to all ones
# #//calculate the bits to send to the dac out of the input values
# #//D= 20bit input code
# for i in range(self._numdacs):
# bitValue = ((numbers[5 + 3*i]<<8) + (numbers[6 + 3*i]<<0))
# if (bitValue & 0b1000000000000000): #check if the number is positive
# #logging.info(i)
# #logging.info('negative number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=(float(bitValue)/((2**16-1)/2))*(self.Halfrange/1000) #multiply with 2V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# else:
# #logging.info(i)
# #logging.info('positive number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=-(self.Halfrange/1000)+(float(bitValue)/(65536.0/2))*(self.Halfrange/1000) #multiply with 10V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# #values[i] = int(((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)),2)-(1<<20)
# #values[i] = (( 20*((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)) )/ 1048575.0) + 10
# #logging.info('DAC: ')
# #logging.info(numbers[4 + 4*i] )
# #logging.info('Val: ')
# #logging.info(values[i])
# return values
# #return numbers
def _numbers_to_mvoltages(self, byte_mess):
'''
Converts a list of bytes to a list containing
the corresponding mvoltages
'''
values = list(range(self._numdacs))
for i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communication with device
def do_get_dac(self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages[channel - 1]
def do_set_dac(self, mvoltage, channel):
'''
Sets the specified dac to the specified voltage
Input:
mvoltage (float) : output voltage in mV
channel (int) : 1 based index of the dac
Output:
reply (string) : errormessage
'''
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
#logging.info('mvoltage after m_to_bytes: ')
#logging.info(mvoltage)
#logging.info('bin(channel: ')
#logging.info(bin(channel))
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)] #0xff is 255
channel = (int(channel)-1) | 0b10000000 #100 is a write operation
#message = "%c%c%c%c" % (channel,mvoltage_bytes[0], mvoltage_bytes[1], mvoltage_bytes[2])
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
#logging.info('bin(message: ')
#logging.info(bin(mvoltage_bytes[0]))
#logging.info(bin(mvoltage_bytes[1]))
#logging.info(bin(mvoltage_bytes[2]))
#logging.info('message: ')
#logging.info(message)
reply = self._send_and_read(message, self.communication_bytes)
#logging.info('bin(reply: ')
#logging.info(bin(reply[0]))
#logging.info(bin(reply[1]))
#logging.info(bin(reply[2]))
#logging.info(bin(reply[3]))
return reply
def do_set_dac_fast(self, mvoltage, channel): #added by Daniel, seems to work
if channel>4:
print('Error: Only channels 1-4 have fast setting.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b11000000 #110 is a write fast operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, 0)
return reply
def do_ramp_dac(self, mvoltage, channel): #added by Daniel, fucks it up completly right now...
if channel>2:
print('Error: Only channels 1-2 have ramping.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b10100000 #110 is a ramp operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, self.communication_bytes)
return reply
def do_set_trigger(self):
'''
Sets the trigger; trigger is 1ms and around 4.2V
Input:
none
Output:
reply (string) : errormessage
'''
logging.debug('Trigger out')
message = "%c%c%c%c" % (4, 0, 2, 6)
reply = self._send_and_read(message.encode())
return reply
def get_dacs(self):
mvoltages = self._get_dacs()
for i in range(self._numdacs):
print('dac{}: '.format(i+1)+str(mvoltages[i]))
return mvoltages
def _get_dacs(self):
'''
Reads from device and returns all dacvoltages in a list
Input:
None
Output:
voltages (float[]) : list containing all dacvoltages (in mV)
'''
logging.debug('Getting dac voltages from instrument')
# first 3 bit are control, last 5 DAC number
message = '\x40' #0b01000000 = 010 = read all dacs
#logging.info(sys.getsizeof(message))
reply = self._send_and_read(message.encode(), self._numdacs*self.communication_bytes+4)
#logging.info(reply)
mvoltages = self._numbers_to_mvoltages(reply)
return mvoltages
def _send_and_read(self, message, bytestoread):
'''
Send <message> to the device and read answer.
Raises an error if one occurred
Returns a list of bytes
Input:
message (string) : string conform the IST_20 protocol
Output:
data_out_numbers (int[]) : return message
'''
logging.info('Sending %r', message)
# clear input buffer
self.ser.flushInput()
#logging.info('Flushed input')
#vpp43.write(self._vi, message) # OLD
self.ser.write(message) # NEW
#logging.info('Wrote Message')
# In stead of blocking, we could also poll, but it's a bit slower
# print visafunc.get_navail(self.lib, self._vi)
# if not visafunc.wait_data(self._vi, 2, 0.5):
# logging.error('Failed to receive reply from IST_20 rack')
# return False
#data1 = visafunc.readn(self._vi, 2) # OLD
#sleep(2)
#logging.info(self.ser.readline())
s=0
data1 = []
while s < bytestoread:
data1.append(ord(self.ser.read()))
#logging.info(s)
s=s+1
#data1 = [ord(s) for s in data1]
#data2 = np.reshape(data1,(-1,4))
#logging.info('finished reading')
#data2 = np.uint32(data1) #from string to 32bit
data2 = data1
#logging.info('converted to uint32')
#logging.info('sendAndRead: %s', data2)
return data2
def set_pol_dacrack(self, flag, channels, get_all=True):
'''
Changes the polarity of the specified set of dacs
Input:
flag (string) : 'BIP', 'POS' or 'NEG'
channel (int) : 0 based index of the rack
get_all (boolean): if True (default) perform a get_all
Output:
None
'''
flagmap = {'NEG': -self.Fullrange, 'BIP': -self.Halfrange, 'POS': 0}
if flag.upper() not in flagmap:
raise KeyError('Tried to set invalid dac polarity %s', flag)
val = flagmap[flag.upper()]
for ch in channels:
self.pol_num[ch - 1] = val
# self.set_parameter_bounds('dac%d' % (i+1), val, val +
# self.Fullrange.0)
if get_all:
self.get_all()
def get_pol_dac(self, channel):
'''
Returns the polarity of the dac channel specified
Input:
channel (int) : 1 based index of the dac
Output:
polarity (string) : 'BIP', 'POS' or 'NEG'
'''
val = self.pol_num[channel - 1]
if (val == -self.Fullrange):
return 'NEG'
elif (val == -self.Halfrange):
return 'BIP'
elif (val == 0):
|
else:
return 'Invalid polarity in memory'
def get_numdacs(self):
'''
Get the number of DACS.
'''
return self._numdacs
def _gen_ch_set_func(self, fun, ch):
def set_func(val):
return fun(val, ch)
return set_func
def _gen_ch_get_func(self, fun, ch):
def get_func():
return fun(ch)
return get_func
def get_all(self):
return self.snapshot(update=True) | return 'POS' | conditional_block |
gm.go | package gm
import (
"fmt"
"github.com/lightpaw/logrus"
"github.com/lightpaw/male7/config"
"github.com/lightpaw/male7/config/kv"
"github.com/lightpaw/male7/config/regdata"
"github.com/lightpaw/male7/entity"
"github.com/lightpaw/male7/entity/npcid"
"github.com/lightpaw/male7/gen/iface"
"github.com/lightpaw/male7/gen/pb/gm"
"github.com/lightpaw/male7/pb/shared_proto"
"github.com/lightpaw/male7/service"
"github.com/lightpaw/male7/service/cluster"
"github.com/lightpaw/male7/service/conflict/heroservice/herolock"
"github.com/lightpaw/male7/service/heromodule"
"github.com/lightpaw/male7/service/monitor/metrics"
"github.com/lightpaw/male7/service/operate_type"
"github.com/lightpaw/male7/util/idbytes"
"github.com/lightpaw/male7/util/imath"
"github.com/lightpaw/male7/util/msg"
"github.com/lightpaw/male7/util/must"
"github.com/lightpaw/male7/util/u64"
"github.com/lightpaw/pbutil"
"runtime/debug"
"strconv"
"strings"
)
func NewGmModule(dep iface.ServiceDep, db iface.DbService, config *kv.IndividualServerConfig, datas *config.ConfigDatas,
modules iface.Modules, realmService iface.RealmService, reminderService iface.ReminderService, buffService iface.BuffService,
pushService iface.PushService, farmService iface.FarmService, mingcWarService iface.MingcWarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: db,
tick: tick,
config: config,
datas: datas,
modules: modules,
heroDataService: dep.HeroData(),
world: dep.World(),
reminderService: reminderService,
realmService: realmService,
heroSnapshotService: dep.HeroSnapshot(),
sharedGuildService: dep.Guild(),
pushService: pushService,
farmService: farmService,
mingcWarService: mingcWarService,
mingcService: mingcService,
clusterService: clusterService,
seasonService: seasonService,
buffService: buffService,
country: country,
gameExporter: gameExporter,
}
if m.config.IsDebug {
if m.config.IsDebugYuanbao {
m.groups = []*gm_group{
{
tab: "常用",
handler: []*gm_handler{
newCmdIntHandler("加元宝(负数表示减)_10", "加元宝(负数表示减)", "100000", func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
m.addYuanbao(amount, hero, result, hc)
result.Changed()
return
})
}),
},
},
}
} else {
m.groups = []*gm_group{
m.newCommonGmGroup(),
m.newDomesticGmGroup(),
m.newGoodsGmGroup(),
m.newSingleGoodsGmGroup(),
m.newSingleEquipmentGmGroup(),
m.newSingleGemGmGroup(),
m.newResetGmGroup(),
m.newTaskGmGroup(),
m.newDungeonGmGroup(),
m.newMailGmGroup(),
m.newLevelGmGroup(),
m.newSceneGmGroup(),
m.newZhanJiangGmGroup(),
m.newMiscGmGroup(),
m.newPrintEquipsGmGroup(),
m.newMingcWarGmGroup(),
m.newMingcGmGroup(),
m.newRedPacketGmGroup(),
m.newCountryGmGroup(),
}
}
}
// 处理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: " + proto.Cmd))
}
//gogen:iface
func (m *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.BaseY()
return false
})
mapData := m.realmService.GetBigMap().GetMapData()
ux, uy := mapData.GetBlockByPos(heroBaseX, heroBaseY)
startX := ux * mapData.BlockData().XLen
startY := uy * mapData.BlockData().YLen
sequence := regdata.BlockSequence(ux, uy)
var data *regdata.RegionMultiLevelNpcData
for _, data = range m.datas.GetRegionMultiLevelNpcDataArray() {
if int32(data.TypeData.Type) == proto.NpcType {
break
}
}
id := npcid.GetNpcId(sequence, data.Id, npcid.NpcType_MultiLevelMonster)
baseX := startX + data.OffsetBaseX
baseY := startY + data.OffsetBaseY
hc.Send(gm.NewS2cInvaseTargetIdMsg(idbytes.ToBytes(id), u64.Int32(baseX), u64.Int32(baseY)))
}
type hero_near_slice struct {
baseX, baseY int
a []*entity.Hero
}
func (a *hero_near_slice) score(hero *entity.Hero) int {
return imath.Abs(hero.BaseX()-a.baseX) + imath.Abs(hero.BaseY()-a.baseY)
}
func (a *hero_near_slice) Len() int { return len(a.a) }
func (a *hero_near_slice) Swap(i, j int) { a.a[i], a.a[j] = a.a[j], a.a[i] }
func (a *hero_near_slice) Less(i, j int) bool { return a.score(a.a[i]) < a.score(a.a[j]) }
func (m *GmModule) getOrCreateFakeHeroControler(id int64) iface.HeroController {
sender := m.world.GetUserCloseSender(id)
if sender != nil {
u, ok := sender.(iface.ConnectedUser)
if ok {
return u.GetHeroController()
}
} else {
sender = fakeSender
}
return service.NewHeroController(id, sender, "127.0.0.1", 0x100007f, 0, m.heroDataService.NewHeroLocker(id))
}
var fakeSender = &fake_sender{}
type fake_sender struct{}
func (m *fake_sender) Id() int64 { return 0 }
func (m *fake_sender) SendAll(msgs []pbutil.Buffer) {}
func (m *fake_sender) Send(msg pbutil.Buffer) {}
func (m *fake_sender) SendIfFree(msg pbutil.Buffer) {}
func (m *fake_sender) Disconnect(err msg.ErrMsg) {}
func (m *fake | ender) DisconnectAndWait(err msg.ErrMsg) {}
func (m *fake_sender) IsClosed() bool { return false }
//func (module *GmModule) processGoodsCmd(args []string, hc iface.HeroController) bool {
//
// module.goodsCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) goodsCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.GoodsData().Array {
// newCount := hero.Depot().AddGoods(data.Id, 100)
// result.Add(depot.NewS2cUpdateGoodsMsg(u64.Int32(data.Id), u64.Int32(newCount)))
// }
//}
//
//func (module *GmModule) processEquipmentCmd(args []string, hc iface.HeroController) bool {
//
// module.equipmentCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) equipmentCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.EquipmentData().Array {
// e := entity.NewEquipment(hero.Depot().NewEquipmentId(), data)
// hero.Depot().AddEquipment(e)
// result.Add(equipment.NewS2cAddEquipmentMsg(must.Marshal(e.EncodeClient())))
// }
//}
| _s | identifier_body |
gm.go | package gm
import (
"fmt"
"github.com/lightpaw/logrus"
"github.com/lightpaw/male7/config"
"github.com/lightpaw/male7/config/kv"
"github.com/lightpaw/male7/config/regdata"
"github.com/lightpaw/male7/entity"
"github.com/lightpaw/male7/entity/npcid"
"github.com/lightpaw/male7/gen/iface"
"github.com/lightpaw/male7/gen/pb/gm"
"github.com/lightpaw/male7/pb/shared_proto"
"github.com/lightpaw/male7/service"
"github.com/lightpaw/male7/service/cluster"
"github.com/lightpaw/male7/service/conflict/heroservice/herolock"
"github.com/lightpaw/male7/service/heromodule"
"github.com/lightpaw/male7/service/monitor/metrics"
"github.com/lightpaw/male7/service/operate_type"
"github.com/lightpaw/male7/util/idbytes"
"github.com/lightpaw/male7/util/imath"
"github.com/lightpaw/male7/util/msg"
"github.com/lightpaw/male7/util/must"
"github.com/lightpaw/male7/util/u64"
"github.com/lightpaw/pbutil"
"runtime/debug"
"strconv"
"strings"
)
func NewGmModule(dep iface.ServiceDep, db iface.DbService, config *kv.IndividualServerConfig, datas *config.ConfigDatas,
modules iface.Modules, realmService iface.RealmService, reminderService iface.ReminderService, buffService iface.BuffService,
pushService iface.PushService, farmService iface.FarmService, mingcWarService iface.MingcWarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: db,
tick: tick,
config: config,
datas: datas,
modules: modules,
heroDataService: dep.HeroData(),
world: dep.World(),
reminderService: reminderService,
realmService: realmService,
heroSnapshotService: dep.HeroSnapshot(),
sharedGuildService: dep.Guild(),
pushService: pushService,
farmService: farmService,
mingcWarService: mingcWarService,
mingcService: mingcService,
clusterService: clusterService, |
if m.config.IsDebug {
if m.config.IsDebugYuanbao {
m.groups = []*gm_group{
{
tab: "常用",
handler: []*gm_handler{
newCmdIntHandler("加元宝(负数表示减)_10", "加元宝(负数表示减)", "100000", func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
m.addYuanbao(amount, hero, result, hc)
result.Changed()
return
})
}),
},
},
}
} else {
m.groups = []*gm_group{
m.newCommonGmGroup(),
m.newDomesticGmGroup(),
m.newGoodsGmGroup(),
m.newSingleGoodsGmGroup(),
m.newSingleEquipmentGmGroup(),
m.newSingleGemGmGroup(),
m.newResetGmGroup(),
m.newTaskGmGroup(),
m.newDungeonGmGroup(),
m.newMailGmGroup(),
m.newLevelGmGroup(),
m.newSceneGmGroup(),
m.newZhanJiangGmGroup(),
m.newMiscGmGroup(),
m.newPrintEquipsGmGroup(),
m.newMingcWarGmGroup(),
m.newMingcGmGroup(),
m.newRedPacketGmGroup(),
m.newCountryGmGroup(),
}
}
}
// 处理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: " + proto.Cmd))
}
//gogen:iface
func (m *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.BaseY()
return false
})
mapData := m.realmService.GetBigMap().GetMapData()
ux, uy := mapData.GetBlockByPos(heroBaseX, heroBaseY)
startX := ux * mapData.BlockData().XLen
startY := uy * mapData.BlockData().YLen
sequence := regdata.BlockSequence(ux, uy)
var data *regdata.RegionMultiLevelNpcData
for _, data = range m.datas.GetRegionMultiLevelNpcDataArray() {
if int32(data.TypeData.Type) == proto.NpcType {
break
}
}
id := npcid.GetNpcId(sequence, data.Id, npcid.NpcType_MultiLevelMonster)
baseX := startX + data.OffsetBaseX
baseY := startY + data.OffsetBaseY
hc.Send(gm.NewS2cInvaseTargetIdMsg(idbytes.ToBytes(id), u64.Int32(baseX), u64.Int32(baseY)))
}
type hero_near_slice struct {
baseX, baseY int
a []*entity.Hero
}
func (a *hero_near_slice) score(hero *entity.Hero) int {
return imath.Abs(hero.BaseX()-a.baseX) + imath.Abs(hero.BaseY()-a.baseY)
}
func (a *hero_near_slice) Len() int { return len(a.a) }
func (a *hero_near_slice) Swap(i, j int) { a.a[i], a.a[j] = a.a[j], a.a[i] }
func (a *hero_near_slice) Less(i, j int) bool { return a.score(a.a[i]) < a.score(a.a[j]) }
func (m *GmModule) getOrCreateFakeHeroControler(id int64) iface.HeroController {
sender := m.world.GetUserCloseSender(id)
if sender != nil {
u, ok := sender.(iface.ConnectedUser)
if ok {
return u.GetHeroController()
}
} else {
sender = fakeSender
}
return service.NewHeroController(id, sender, "127.0.0.1", 0x100007f, 0, m.heroDataService.NewHeroLocker(id))
}
var fakeSender = &fake_sender{}
type fake_sender struct{}
func (m *fake_sender) Id() int64 { return 0 }
func (m *fake_sender) SendAll(msgs []pbutil.Buffer) {}
func (m *fake_sender) Send(msg pbutil.Buffer) {}
func (m *fake_sender) SendIfFree(msg pbutil.Buffer) {}
func (m *fake_sender) Disconnect(err msg.ErrMsg) {}
func (m *fake_sender) DisconnectAndWait(err msg.ErrMsg) {}
func (m *fake_sender) IsClosed() bool { return false }
//func (module *GmModule) processGoodsCmd(args []string, hc iface.HeroController) bool {
//
// module.goodsCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) goodsCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.GoodsData().Array {
// newCount := hero.Depot().AddGoods(data.Id, 100)
// result.Add(depot.NewS2cUpdateGoodsMsg(u64.Int32(data.Id), u64.Int32(newCount)))
// }
//}
//
//func (module *GmModule) processEquipmentCmd(args []string, hc iface.HeroController) bool {
//
// module.equipmentCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) equipmentCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.EquipmentData().Array {
// e := entity.NewEquipment(hero.Depot().NewEquipmentId(), data)
// hero.Depot().AddEquipment(e)
// result.Add(equipment.NewS2cAddEquipmentMsg(must.Marshal(e.EncodeClient())))
// }
//} | seasonService: seasonService,
buffService: buffService,
country: country,
gameExporter: gameExporter,
} | random_line_split |
gm.go | package gm
import (
"fmt"
"github.com/lightpaw/logrus"
"github.com/lightpaw/male7/config"
"github.com/lightpaw/male7/config/kv"
"github.com/lightpaw/male7/config/regdata"
"github.com/lightpaw/male7/entity"
"github.com/lightpaw/male7/entity/npcid"
"github.com/lightpaw/male7/gen/iface"
"github.com/lightpaw/male7/gen/pb/gm"
"github.com/lightpaw/male7/pb/shared_proto"
"github.com/lightpaw/male7/service"
"github.com/lightpaw/male7/service/cluster"
"github.com/lightpaw/male7/service/conflict/heroservice/herolock"
"github.com/lightpaw/male7/service/heromodule"
"github.com/lightpaw/male7/service/monitor/metrics"
"github.com/lightpaw/male7/service/operate_type"
"github.com/lightpaw/male7/util/idbytes"
"github.com/lightpaw/male7/util/imath"
"github.com/lightpaw/male7/util/msg"
"github.com/lightpaw/male7/util/must"
"github.com/lightpaw/male7/util/u64"
"github.com/lightpaw/pbutil"
"runtime/debug"
"strconv"
"strings"
)
func NewGmModule(dep iface.ServiceDep, db iface.DbService, config *kv.IndividualServerConfig, datas *config.ConfigDatas,
modules iface.Modules, realmService iface.RealmService, reminderService iface.ReminderService, buffService iface.BuffService,
pushService iface.PushService, farmService iface.FarmService, mingcWarService iface.MingcWarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: db,
tick: tick,
config: config,
datas: datas,
modules: modules,
heroDataService: dep.HeroData(),
world: dep.World(),
reminderService: reminderService,
realmService: realmService,
heroSnapshotService: dep.HeroSnapshot(),
sharedGuildService: dep.Guild(),
pushService: pushService,
farmService: farmService,
mingcWarService: mingcWarService,
mingcService: mingcService,
clusterService: clusterService,
seasonService: seasonService,
buffService: buffService,
country: country,
gameExporter: gameExporter,
}
if m.config.IsDebug {
if m.config.IsDebugYuanbao {
m.groups = []*gm_group{
{
tab: "常用",
handler: []*gm_handler{
newCmdIntHandler("加元宝(负数表示减)_10", "加元宝(负数表示减)", "100000", func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
m.addYuanbao(amount, hero, result, hc)
result.Changed()
return
})
}),
},
},
}
} else {
m.groups = []*gm_group{
m.newCommonGmGroup(),
m.newDomesticGmGroup(),
m.newGoodsGmGroup(),
m.newSingleGoodsGmGroup(),
m.newSingleEquipmentGmGroup(),
m.newSingleGemGmGroup(),
m.newResetGmGroup(),
m.newTaskGmGroup(),
m.newDungeonGmGroup(),
m.newMailGmGroup(),
m.newLevelGmGroup(),
m.newSceneGmGroup(),
m.newZhanJiangGmGroup(),
m.newMiscGmGroup(),
m.newPrintEquipsGmGroup(),
m.newMingcWarGmGroup(),
m.newMingcGmGroup(),
m.newRedPacketGmGroup(),
m.newCountryGmGroup(),
}
}
}
// 处理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: | *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.BaseY()
return false
})
mapData := m.realmService.GetBigMap().GetMapData()
ux, uy := mapData.GetBlockByPos(heroBaseX, heroBaseY)
startX := ux * mapData.BlockData().XLen
startY := uy * mapData.BlockData().YLen
sequence := regdata.BlockSequence(ux, uy)
var data *regdata.RegionMultiLevelNpcData
for _, data = range m.datas.GetRegionMultiLevelNpcDataArray() {
if int32(data.TypeData.Type) == proto.NpcType {
break
}
}
id := npcid.GetNpcId(sequence, data.Id, npcid.NpcType_MultiLevelMonster)
baseX := startX + data.OffsetBaseX
baseY := startY + data.OffsetBaseY
hc.Send(gm.NewS2cInvaseTargetIdMsg(idbytes.ToBytes(id), u64.Int32(baseX), u64.Int32(baseY)))
}
type hero_near_slice struct {
baseX, baseY int
a []*entity.Hero
}
func (a *hero_near_slice) score(hero *entity.Hero) int {
return imath.Abs(hero.BaseX()-a.baseX) + imath.Abs(hero.BaseY()-a.baseY)
}
func (a *hero_near_slice) Len() int { return len(a.a) }
func (a *hero_near_slice) Swap(i, j int) { a.a[i], a.a[j] = a.a[j], a.a[i] }
func (a *hero_near_slice) Less(i, j int) bool { return a.score(a.a[i]) < a.score(a.a[j]) }
func (m *GmModule) getOrCreateFakeHeroControler(id int64) iface.HeroController {
sender := m.world.GetUserCloseSender(id)
if sender != nil {
u, ok := sender.(iface.ConnectedUser)
if ok {
return u.GetHeroController()
}
} else {
sender = fakeSender
}
return service.NewHeroController(id, sender, "127.0.0.1", 0x100007f, 0, m.heroDataService.NewHeroLocker(id))
}
var fakeSender = &fake_sender{}
type fake_sender struct{}
func (m *fake_sender) Id() int64 { return 0 }
func (m *fake_sender) SendAll(msgs []pbutil.Buffer) {}
func (m *fake_sender) Send(msg pbutil.Buffer) {}
func (m *fake_sender) SendIfFree(msg pbutil.Buffer) {}
func (m *fake_sender) Disconnect(err msg.ErrMsg) {}
func (m *fake_sender) DisconnectAndWait(err msg.ErrMsg) {}
func (m *fake_sender) IsClosed() bool { return false }
//func (module *GmModule) processGoodsCmd(args []string, hc iface.HeroController) bool {
//
// module.goodsCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) goodsCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.GoodsData().Array {
// newCount := hero.Depot().AddGoods(data.Id, 100)
// result.Add(depot.NewS2cUpdateGoodsMsg(u64.Int32(data.Id), u64.Int32(newCount)))
// }
//}
//
//func (module *GmModule) processEquipmentCmd(args []string, hc iface.HeroController) bool {
//
// module.equipmentCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) equipmentCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.EquipmentData().Array {
// e := entity.NewEquipment(hero.Depot().NewEquipmentId(), data)
// hero.Depot().AddEquipment(e)
// result.Add(equipment.NewS2cAddEquipmentMsg(must.Marshal(e.EncodeClient())))
// }
//}
| " + proto.Cmd))
}
//gogen:iface
func (m | conditional_block |
gm.go | package gm
import (
"fmt"
"github.com/lightpaw/logrus"
"github.com/lightpaw/male7/config"
"github.com/lightpaw/male7/config/kv"
"github.com/lightpaw/male7/config/regdata"
"github.com/lightpaw/male7/entity"
"github.com/lightpaw/male7/entity/npcid"
"github.com/lightpaw/male7/gen/iface"
"github.com/lightpaw/male7/gen/pb/gm"
"github.com/lightpaw/male7/pb/shared_proto"
"github.com/lightpaw/male7/service"
"github.com/lightpaw/male7/service/cluster"
"github.com/lightpaw/male7/service/conflict/heroservice/herolock"
"github.com/lightpaw/male7/service/heromodule"
"github.com/lightpaw/male7/service/monitor/metrics"
"github.com/lightpaw/male7/service/operate_type"
"github.com/lightpaw/male7/util/idbytes"
"github.com/lightpaw/male7/util/imath"
"github.com/lightpaw/male7/util/msg"
"github.com/lightpaw/male7/util/must"
"github.com/lightpaw/male7/util/u64"
"github.com/lightpaw/pbutil"
"runtime/debug"
"strconv"
"strings"
)
func NewGmModule(dep iface.ServiceDep, db iface.DbService, config *kv.IndividualServerConfig, datas *config.ConfigDatas,
modules iface.Modules, realmService iface.RealmService, reminderService iface.ReminderService, buffService iface.BuffService,
pushService iface.PushService, farmService iface.FarmService, mingcWarService iface.MingcWarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: db,
tick: tick,
config: config,
datas: datas,
modules: modules,
heroDataService: dep.HeroData(),
world: dep.World(),
reminderService: reminderService,
realmService: realmService,
heroSnapshotService: dep.HeroSnapshot(),
sharedGuildService: dep.Guild(),
pushService: pushService,
farmService: farmService,
mingcWarService: mingcWarService,
mingcService: mingcService,
clusterService: clusterService,
seasonService: seasonService,
buffService: buffService,
country: country,
gameExporter: gameExporter,
}
if m.config.IsDebug {
if m.config.IsDebugYuanbao {
m.groups = []*gm_group{
{
tab: "常用",
handler: []*gm_handler{
newCmdIntHandler("加元宝(负数表示减)_10", "加元宝(负数表示减)", "100000", func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
m.addYuanbao(amount, hero, result, hc)
result.Changed()
return
})
}),
},
},
}
} else {
m.groups = []*gm_group{
m.newCommonGmGroup(),
m.newDomesticGmGroup(),
m.newGoodsGmGroup(),
m.newSingleGoodsGmGroup(),
m.newSingleEquipmentGmGroup(),
m.newSingleGemGmGroup(),
m.newResetGmGroup(),
m.newTaskGmGroup(),
m.newDungeonGmGroup(),
m.newMailGmGroup(),
m.newLevelGmGroup(),
m.newSceneGmGroup(),
m.newZhanJiangGmGroup(),
m.newMiscGmGroup(),
m.newPrintEquipsGmGroup(),
m.newMingcWarGmGroup(),
m.newMingcGmGroup(),
m.newRedPacketGmGroup(),
m.newCountryGmGroup(),
}
}
}
// 处理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: " + proto.Cmd))
}
//gogen:iface
func (m *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.BaseY()
return false
})
mapData := m.realmService.GetBigMap().GetMapData()
ux, uy := mapData.GetBlockByPos(heroBaseX, heroBaseY)
startX := ux * mapData.BlockData().XLen
startY := uy * mapData.BlockData().YLen
sequence := regdata.BlockSequence(ux, uy)
var data *regdata.RegionMultiLevelNpcData
for _, data = range m.datas.GetRegionMultiLevelNpcDataArray() {
if int32(data.TypeData.Type) == proto.NpcType {
break
}
}
id := npcid.GetNpcId(sequence, data.Id, npcid.NpcType_MultiLevelMonster)
baseX := startX + data.OffsetBaseX
baseY := startY + data.OffsetBaseY
hc.Send(gm.NewS2cInvaseTargetIdMsg(idbytes.ToBytes(id), u64.Int32(baseX), u64.Int32(baseY)))
}
type hero_near_slice struct {
baseX, baseY int
a []*entity.Hero
}
func (a *hero_near_slice) score(hero *entity.Hero) int {
return imath.Abs(hero.BaseX()-a.baseX) + imath.Abs(hero.BaseY()-a.baseY)
}
func (a *hero_near_slice) Len() int { return len(a.a) }
func (a *hero_near_slice) Swap(i, j int) { a.a[i], a.a[j] = a.a[j], a.a[i] }
func (a *hero_near_slice) Less(i, j int) bool { return a.score(a.a[i]) < a.score(a.a[j]) }
func (m *GmModule) getOrCreateFakeHeroControler(id int64) iface.HeroController {
sender := m.world.GetUserCloseSender(id)
if sender != nil {
u, ok := sender.(iface.ConnectedUser)
if ok {
return u.GetHeroController()
}
} else {
sender = fakeSender
}
return service.NewHeroController(id, sender, "127.0.0.1", 0x100007f, 0, m.heroDataService.NewHeroLocker(id))
}
var fakeSender = &fake_sender{}
type fake_sender struct{}
func (m *fake_sender) Id() int64 { return 0 }
func (m *fake_sender) SendAll(msgs []pbutil.Buffer) {}
func (m *fake_sender) Send(msg pbutil.Buffer) {}
func (m *fake_sender) SendIfFree(msg pbutil.Buffer) {}
func (m *fake_sender) Disconnect(err msg.ErrMsg) {}
func (m *fake_sender) DisconnectAndWait(err msg.ErrMsg) {}
func (m *fake_sender) IsClosed() bool { return false }
//func (module *GmModul | d(args []string, hc iface.HeroController) bool {
//
// module.goodsCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) goodsCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.GoodsData().Array {
// newCount := hero.Depot().AddGoods(data.Id, 100)
// result.Add(depot.NewS2cUpdateGoodsMsg(u64.Int32(data.Id), u64.Int32(newCount)))
// }
//}
//
//func (module *GmModule) processEquipmentCmd(args []string, hc iface.HeroController) bool {
//
// module.equipmentCmd("", hc)
//
// return true
//}
//
//func (module *GmModule) equipmentCmd(args string, hc iface.HeroController) {
// for _, data := range module.datas.EquipmentData().Array {
// e := entity.NewEquipment(hero.Depot().NewEquipmentId(), data)
// hero.Depot().AddEquipment(e)
// result.Add(equipment.NewS2cAddEquipmentMsg(must.Marshal(e.EncodeClient())))
// }
//}
| e) processGoodsCm | identifier_name |
prune_head_with_taylor.py | "Pruning attention heads with taylor expansion methods"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import torch.nn.functional as F
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.model_prune_head import BertForSequenceClassification, BertForPreTrainingLossMask
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from examples.classifier_processer import InputExample, InputFeatures, DataProcessor, MrpcProcessor, MnliProcessor, RteProcessor, ScitailProcessor, ColaProcessor, SstProcessor, QqpProcessor, QnliProcessor, WnliProcessor, StsProcessor
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"rte": RteProcessor,
"sst-2": SstProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"wnli": WnliProcessor,
"sts-b": StsProcessor,
"scitail": ScitailProcessor,
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"rte": 2,
"sst-2": 2,
"qqp": 2,
"qnli": 2,
"wnli": 2,
"sts-b": 1,
"scitail": 2,
}
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
|
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
# Load a fine-tuned model
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare the data
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)[0:args.num_examples]
model.eval()
if args.bert_model.find("base") != -1:
num_head, num_layer = 12, 12
elif args.bert_model.find("large") != -1:
num_head, num_layer = 16, 24
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
imp_head_count = [[0]*num_head for i in range(num_layer)]
prune_head_count = [[0]*num_head for i in range(num_layer)]
all_logits, all_label_ids = [], []
head_importance = torch.zeros(num_layer, num_head).to(device)
# evaluate the model
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
all_baseline_ids = torch.tensor(
[f.baseline_ids for f in eval_features], dtype=torch.long)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_baseline_ids, all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
index_count = 0
tot_tokens = 0
for baseline_ids, input_ids, input_mask, segment_ids, label_ids, in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
input_len = int(input_mask[0].sum())
att, loss = model(input_ids, "att", segment_ids, input_mask, label_ids, 0)
loss = loss.sum()
loss.backward()
for layer in range(num_layer):
self_att = model.bert.encoder.layer[layer].attention.self
ctx = self_att.context_layer_val
grad_ctx = ctx.grad
# Take the dot
dot = torch.einsum("bhli,bhli->bhl", [grad_ctx, ctx])
head_importance[layer] += dot.abs().sum(-1).sum(0).detach()
tot_tokens += input_len
head_importance[:-1] /= tot_tokens
head_importance[-1] /= args.num_examples
# normalize socres by layer
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
with open(os.path.join(args.output_dir, "head_importance_taylor.json"), "w") as f_out:
f_out.write(json.dumps(head_importance.tolist(), indent=2, sort_keys=True))
f_out.write('\n')
head_importance = head_importance.tolist()
final_importance = []
for i in range(num_layer):
final_importance += head_importance[i]
head_importance = final_importance
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)
prune_result = []
for prune_i in range(0, 11, 1):
prune_rate = prune_i / 10
important_index = np.argsort(np.array(head_importance))
importance_set = [[1.0]*num_head for i in range(0, num_layer)]
for i in range(0, min(int(prune_rate*num_layer*num_head),num_layer*num_head), 1):
importance_set[important_index[i]//num_head][important_index[i]%num_head] = 0.0
importance_set = torch.tensor(importance_set)
importance_set = importance_set.view(*importance_set.shape, 1, 1)
importance_set = importance_set.expand(-1, -1, args.max_seq_length, args.max_seq_length).to(device)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
seg_result_dict = {}
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation: %s *****", eval_segment)
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, "res", segment_ids, input_mask, label_ids, att_head_mask=importance_set)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
all_logits.append(logits)
all_label_ids.append(label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
# compute evaluation metric
all_logits = np.concatenate(all_logits, axis=0)
all_label_ids = np.concatenate(all_label_ids, axis=0)
metric_func = processor.get_metric_func()
eval_result = metric_func(all_logits, all_label_ids)
prune_result.append(eval_result)
result = {'prune_rate': prune_rate,
'eval_loss': eval_loss,
'eval_result': eval_result,
'task_name': args.task_name,
'eval_segment': eval_segment}
if eval_segment not in seg_result_dict:
seg_result_dict[eval_segment] = []
seg_result_dict[eval_segment].append(result)
# logging the results
logger.info(
"***** Eval results ({0}) *****".format(eval_segment))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
with open(os.path.join(args.output_dir, "prune_result_taylor.json"), "w") as f_out:
f_out.write(json.dumps(prune_result, indent=2, sort_keys=True))
f_out.write('\n')
if __name__ == "__main__":
main() | torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl') | conditional_block |
prune_head_with_taylor.py | "Pruning attention heads with taylor expansion methods"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import torch.nn.functional as F
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.model_prune_head import BertForSequenceClassification, BertForPreTrainingLossMask
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from examples.classifier_processer import InputExample, InputFeatures, DataProcessor, MrpcProcessor, MnliProcessor, RteProcessor, ScitailProcessor, ColaProcessor, SstProcessor, QqpProcessor, QnliProcessor, WnliProcessor, StsProcessor
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"rte": RteProcessor,
"sst-2": SstProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"wnli": WnliProcessor,
"sts-b": StsProcessor,
"scitail": ScitailProcessor,
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"rte": 2,
"sst-2": 2,
"qqp": 2,
"qnli": 2,
"wnli": 2,
"sts-b": 1,
"scitail": 2,
}
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
|
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
# Load a fine-tuned model
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare the data
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)[0:args.num_examples]
model.eval()
if args.bert_model.find("base") != -1:
num_head, num_layer = 12, 12
elif args.bert_model.find("large") != -1:
num_head, num_layer = 16, 24
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
imp_head_count = [[0]*num_head for i in range(num_layer)]
prune_head_count = [[0]*num_head for i in range(num_layer)]
all_logits, all_label_ids = [], []
head_importance = torch.zeros(num_layer, num_head).to(device)
# evaluate the model
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
all_baseline_ids = torch.tensor(
[f.baseline_ids for f in eval_features], dtype=torch.long)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_baseline_ids, all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
index_count = 0
tot_tokens = 0
for baseline_ids, input_ids, input_mask, segment_ids, label_ids, in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
input_len = int(input_mask[0].sum())
att, loss = model(input_ids, "att", segment_ids, input_mask, label_ids, 0)
loss = loss.sum()
loss.backward()
for layer in range(num_layer):
self_att = model.bert.encoder.layer[layer].attention.self
ctx = self_att.context_layer_val
grad_ctx = ctx.grad
# Take the dot
dot = torch.einsum("bhli,bhli->bhl", [grad_ctx, ctx])
head_importance[layer] += dot.abs().sum(-1).sum(0).detach()
tot_tokens += input_len
head_importance[:-1] /= tot_tokens
head_importance[-1] /= args.num_examples
# normalize socres by layer
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
with open(os.path.join(args.output_dir, "head_importance_taylor.json"), "w") as f_out:
f_out.write(json.dumps(head_importance.tolist(), indent=2, sort_keys=True))
f_out.write('\n')
head_importance = head_importance.tolist()
final_importance = []
for i in range(num_layer):
final_importance += head_importance[i]
head_importance = final_importance
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)
prune_result = []
for prune_i in range(0, 11, 1):
prune_rate = prune_i / 10
important_index = np.argsort(np.array(head_importance))
importance_set = [[1.0]*num_head for i in range(0, num_layer)]
for i in range(0, min(int(prune_rate*num_layer*num_head),num_layer*num_head), 1):
importance_set[important_index[i]//num_head][important_index[i]%num_head] = 0.0
importance_set = torch.tensor(importance_set)
importance_set = importance_set.view(*importance_set.shape, 1, 1)
importance_set = importance_set.expand(-1, -1, args.max_seq_length, args.max_seq_length).to(device)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
seg_result_dict = {}
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation: %s *****", eval_segment)
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, "res", segment_ids, input_mask, label_ids, att_head_mask=importance_set)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
all_logits.append(logits)
all_label_ids.append(label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
# compute evaluation metric
all_logits = np.concatenate(all_logits, axis=0)
all_label_ids = np.concatenate(all_label_ids, axis=0)
metric_func = processor.get_metric_func()
eval_result = metric_func(all_logits, all_label_ids)
prune_result.append(eval_result)
result = {'prune_rate': prune_rate,
'eval_loss': eval_loss,
'eval_result': eval_result,
'task_name': args.task_name,
'eval_segment': eval_segment}
if eval_segment not in seg_result_dict:
seg_result_dict[eval_segment] = []
seg_result_dict[eval_segment].append(result)
# logging the results
logger.info(
"***** Eval results ({0}) *****".format(eval_segment))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
with open(os.path.join(args.output_dir, "prune_result_taylor.json"), "w") as f_out:
f_out.write(json.dumps(prune_result, indent=2, sort_keys=True))
f_out.write('\n')
if __name__ == "__main__":
main() | """Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist | identifier_body |
prune_head_with_taylor.py | "Pruning attention heads with taylor expansion methods"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import torch.nn.functional as F
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.model_prune_head import BertForSequenceClassification, BertForPreTrainingLossMask
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from examples.classifier_processer import InputExample, InputFeatures, DataProcessor, MrpcProcessor, MnliProcessor, RteProcessor, ScitailProcessor, ColaProcessor, SstProcessor, QqpProcessor, QnliProcessor, WnliProcessor, StsProcessor
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"rte": RteProcessor,
"sst-2": SstProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"wnli": WnliProcessor,
"sts-b": StsProcessor,
"scitail": ScitailProcessor,
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"rte": 2,
"sst-2": 2,
"qqp": 2,
"qnli": 2,
"wnli": 2,
"sts-b": 1,
"scitail": 2,
}
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist
def | (tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
# Load a fine-tuned model
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare the data
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)[0:args.num_examples]
model.eval()
if args.bert_model.find("base") != -1:
num_head, num_layer = 12, 12
elif args.bert_model.find("large") != -1:
num_head, num_layer = 16, 24
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
imp_head_count = [[0]*num_head for i in range(num_layer)]
prune_head_count = [[0]*num_head for i in range(num_layer)]
all_logits, all_label_ids = [], []
head_importance = torch.zeros(num_layer, num_head).to(device)
# evaluate the model
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
all_baseline_ids = torch.tensor(
[f.baseline_ids for f in eval_features], dtype=torch.long)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_baseline_ids, all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
index_count = 0
tot_tokens = 0
for baseline_ids, input_ids, input_mask, segment_ids, label_ids, in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
input_len = int(input_mask[0].sum())
att, loss = model(input_ids, "att", segment_ids, input_mask, label_ids, 0)
loss = loss.sum()
loss.backward()
for layer in range(num_layer):
self_att = model.bert.encoder.layer[layer].attention.self
ctx = self_att.context_layer_val
grad_ctx = ctx.grad
# Take the dot
dot = torch.einsum("bhli,bhli->bhl", [grad_ctx, ctx])
head_importance[layer] += dot.abs().sum(-1).sum(0).detach()
tot_tokens += input_len
head_importance[:-1] /= tot_tokens
head_importance[-1] /= args.num_examples
# normalize socres by layer
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
with open(os.path.join(args.output_dir, "head_importance_taylor.json"), "w") as f_out:
f_out.write(json.dumps(head_importance.tolist(), indent=2, sort_keys=True))
f_out.write('\n')
head_importance = head_importance.tolist()
final_importance = []
for i in range(num_layer):
final_importance += head_importance[i]
head_importance = final_importance
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)
prune_result = []
for prune_i in range(0, 11, 1):
prune_rate = prune_i / 10
important_index = np.argsort(np.array(head_importance))
importance_set = [[1.0]*num_head for i in range(0, num_layer)]
for i in range(0, min(int(prune_rate*num_layer*num_head),num_layer*num_head), 1):
importance_set[important_index[i]//num_head][important_index[i]%num_head] = 0.0
importance_set = torch.tensor(importance_set)
importance_set = importance_set.view(*importance_set.shape, 1, 1)
importance_set = importance_set.expand(-1, -1, args.max_seq_length, args.max_seq_length).to(device)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
seg_result_dict = {}
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation: %s *****", eval_segment)
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, "res", segment_ids, input_mask, label_ids, att_head_mask=importance_set)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
all_logits.append(logits)
all_label_ids.append(label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
# compute evaluation metric
all_logits = np.concatenate(all_logits, axis=0)
all_label_ids = np.concatenate(all_label_ids, axis=0)
metric_func = processor.get_metric_func()
eval_result = metric_func(all_logits, all_label_ids)
prune_result.append(eval_result)
result = {'prune_rate': prune_rate,
'eval_loss': eval_loss,
'eval_result': eval_result,
'task_name': args.task_name,
'eval_segment': eval_segment}
if eval_segment not in seg_result_dict:
seg_result_dict[eval_segment] = []
seg_result_dict[eval_segment].append(result)
# logging the results
logger.info(
"***** Eval results ({0}) *****".format(eval_segment))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
with open(os.path.join(args.output_dir, "prune_result_taylor.json"), "w") as f_out:
f_out.write(json.dumps(prune_result, indent=2, sort_keys=True))
f_out.write('\n')
if __name__ == "__main__":
main() | _truncate_seq_pair | identifier_name |
prune_head_with_taylor.py | "Pruning attention heads with taylor expansion methods"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import torch.nn.functional as F
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.model_prune_head import BertForSequenceClassification, BertForPreTrainingLossMask
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from examples.classifier_processer import InputExample, InputFeatures, DataProcessor, MrpcProcessor, MnliProcessor, RteProcessor, ScitailProcessor, ColaProcessor, SstProcessor, QqpProcessor, QnliProcessor, WnliProcessor, StsProcessor
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
| "rte": RteProcessor,
"sst-2": SstProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"wnli": WnliProcessor,
"sts-b": StsProcessor,
"scitail": ScitailProcessor,
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"rte": 2,
"sst-2": 2,
"qqp": 2,
"qnli": 2,
"wnli": 2,
"sts-b": 1,
"scitail": 2,
}
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
# Load a fine-tuned model
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare the data
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)[0:args.num_examples]
model.eval()
if args.bert_model.find("base") != -1:
num_head, num_layer = 12, 12
elif args.bert_model.find("large") != -1:
num_head, num_layer = 16, 24
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
imp_head_count = [[0]*num_head for i in range(num_layer)]
prune_head_count = [[0]*num_head for i in range(num_layer)]
all_logits, all_label_ids = [], []
head_importance = torch.zeros(num_layer, num_head).to(device)
# evaluate the model
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
all_baseline_ids = torch.tensor(
[f.baseline_ids for f in eval_features], dtype=torch.long)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_baseline_ids, all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
index_count = 0
tot_tokens = 0
for baseline_ids, input_ids, input_mask, segment_ids, label_ids, in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
input_len = int(input_mask[0].sum())
att, loss = model(input_ids, "att", segment_ids, input_mask, label_ids, 0)
loss = loss.sum()
loss.backward()
for layer in range(num_layer):
self_att = model.bert.encoder.layer[layer].attention.self
ctx = self_att.context_layer_val
grad_ctx = ctx.grad
# Take the dot
dot = torch.einsum("bhli,bhli->bhl", [grad_ctx, ctx])
head_importance[layer] += dot.abs().sum(-1).sum(0).detach()
tot_tokens += input_len
head_importance[:-1] /= tot_tokens
head_importance[-1] /= args.num_examples
# normalize socres by layer
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
with open(os.path.join(args.output_dir, "head_importance_taylor.json"), "w") as f_out:
f_out.write(json.dumps(head_importance.tolist(), indent=2, sort_keys=True))
f_out.write('\n')
head_importance = head_importance.tolist()
final_importance = []
for i in range(num_layer):
final_importance += head_importance[i]
head_importance = final_importance
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)
prune_result = []
for prune_i in range(0, 11, 1):
prune_rate = prune_i / 10
important_index = np.argsort(np.array(head_importance))
importance_set = [[1.0]*num_head for i in range(0, num_layer)]
for i in range(0, min(int(prune_rate*num_layer*num_head),num_layer*num_head), 1):
importance_set[important_index[i]//num_head][important_index[i]%num_head] = 0.0
importance_set = torch.tensor(importance_set)
importance_set = importance_set.view(*importance_set.shape, 1, 1)
importance_set = importance_set.expand(-1, -1, args.max_seq_length, args.max_seq_length).to(device)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
seg_result_dict = {}
eval_features, tokenlist = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation: %s *****", eval_segment)
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=lbl_type)
eval_data = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
all_logits, all_label_ids = [], []
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, "res", segment_ids, input_mask, label_ids, att_head_mask=importance_set)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
all_logits.append(logits)
all_label_ids.append(label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
# compute evaluation metric
all_logits = np.concatenate(all_logits, axis=0)
all_label_ids = np.concatenate(all_label_ids, axis=0)
metric_func = processor.get_metric_func()
eval_result = metric_func(all_logits, all_label_ids)
prune_result.append(eval_result)
result = {'prune_rate': prune_rate,
'eval_loss': eval_loss,
'eval_result': eval_result,
'task_name': args.task_name,
'eval_segment': eval_segment}
if eval_segment not in seg_result_dict:
seg_result_dict[eval_segment] = []
seg_result_dict[eval_segment].append(result)
# logging the results
logger.info(
"***** Eval results ({0}) *****".format(eval_segment))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
with open(os.path.join(args.output_dir, "prune_result_taylor.json"), "w") as f_out:
f_out.write(json.dumps(prune_result, indent=2, sort_keys=True))
f_out.write('\n')
if __name__ == "__main__":
main() | processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor, | random_line_split |
family.go | // Copyright (c) 2019-2021 Leonid Kneller. All rights reserved.
// Licensed under the MIT license.
// See the LICENSE file for full license information.
package randomnames
// family -- 1000 most frequent family names from the 2010 US Census.
var family [1000]string = [1000]string{
"Smith",
"Johnson",
"Williams",
"Brown",
"Jones",
"Garcia",
"Miller",
"Davis",
"Rodriguez",
"Martinez",
"Hernandez",
"Lopez",
"Gonzalez",
"Wilson",
"Anderson",
"Thomas",
"Taylor",
"Moore",
"Jackson",
"Martin",
"Lee",
"Perez",
"Thompson",
"White",
"Harris",
"Sanchez",
"Clark",
"Ramirez",
"Lewis",
"Robinson",
"Walker",
"Young",
"Allen",
"King",
"Wright",
"Scott",
"Torres",
"Nguyen",
"Hill",
"Flores",
"Green",
"Adams",
"Nelson",
"Baker",
"Hall",
"Rivera",
"Campbell",
"Mitchell",
"Carter",
"Roberts",
"Gomez",
"Phillips",
"Evans",
"Turner",
"Diaz",
"Parker",
"Cruz",
"Edwards",
"Collins",
"Reyes",
"Stewart",
"Morris",
"Morales",
"Murphy",
"Cook",
"Rogers",
"Gutierrez",
"Ortiz",
"Morgan",
"Cooper",
"Peterson",
"Bailey",
"Reed",
"Kelly",
"Howard",
"Ramos",
"Kim",
"Cox",
"Ward",
"Richardson",
"Watson",
"Brooks",
"Chavez",
"Wood",
"James",
"Bennett",
"Gray",
"Mendoza",
"Ruiz",
"Hughes",
"Price",
"Alvarez",
"Castillo",
"Sanders",
"Patel",
"Myers",
"Long",
"Ross",
"Foster",
"Jimenez",
"Powell",
"Jenkins",
"Perry",
"Russell",
"Sullivan",
"Bell",
"Coleman",
"Butler",
"Henderson",
"Barnes",
"Gonzales",
"Fisher",
"Vasquez",
"Simmons",
"Romero",
"Jordan",
"Patterson",
"Alexander",
"Hamilton",
"Graham",
"Reynolds",
"Griffin",
"Wallace",
"Moreno",
"West",
"Cole",
"Hayes",
"Bryant",
"Herrera",
"Gibson",
"Ellis",
"Tran",
"Medina",
"Aguilar",
"Stevens",
"Murray",
"Ford",
"Castro",
"Marshall",
"Owens",
"Harrison",
"Fernandez",
"Mcdonald",
"Woods",
"Washington",
"Kennedy",
"Wells",
"Vargas",
"Henry",
"Chen",
"Freeman",
"Webb",
"Tucker",
"Guzman",
"Burns",
"Crawford",
"Olson",
"Simpson",
"Porter",
"Hunter",
"Gordon",
"Mendez",
"Silva",
"Shaw",
"Snyder",
"Mason",
"Dixon",
"Munoz",
"Hunt",
"Hicks",
"Holmes",
"Palmer",
"Wagner",
"Black",
"Robertson",
"Boyd",
"Rose",
"Stone",
"Salazar",
"Fox",
"Warren",
"Mills",
"Meyer",
"Rice",
"Schmidt",
"Garza",
"Daniels",
"Ferguson",
"Nichols",
"Stephens",
"Soto",
"Weaver",
"Ryan",
"Gardner",
"Payne",
"Grant",
"Dunn",
"Kelley",
"Spencer",
"Hawkins",
"Arnold",
"Pierce",
"Vazquez",
"Hansen",
"Peters",
"Santos",
"Hart",
"Bradley",
"Knight",
"Elliott",
"Cunningham",
"Duncan",
"Armstrong",
"Hudson",
"Carroll",
"Lane",
"Riley",
"Andrews",
"Alvarado",
"Ray",
"Delgado",
"Berry",
"Perkins",
"Hoffman",
"Johnston",
"Matthews",
"Pena",
"Richards",
"Contreras",
"Willis",
"Carpenter",
"Lawrence",
"Sandoval",
"Guerrero",
"George",
"Chapman",
"Rios",
"Estrada",
"Ortega",
"Watkins",
"Greene",
"Nunez",
"Wheeler",
"Valdez",
"Harper",
"Burke",
"Larson",
"Santiago",
"Maldonado",
"Morrison",
"Franklin",
"Carlson",
"Austin",
"Dominguez",
"Carr",
"Lawson",
"Jacobs",
"Obrien",
"Lynch",
"Singh",
"Vega",
"Bishop",
"Montgomery",
"Oliver",
"Jensen",
"Harvey",
"Williamson",
"Gilbert",
"Dean",
"Sims",
"Espinoza",
"Howell",
"Li",
"Wong",
"Reid",
"Hanson",
"Le",
"Mccoy",
"Garrett",
"Burton",
"Fuller",
"Wang",
"Weber",
"Welch",
"Rojas",
"Lucas",
"Marquez",
"Fields",
"Park",
"Yang",
"Little",
"Banks",
"Padilla",
"Day",
"Walsh",
"Bowman",
"Schultz",
"Luna",
"Fowler",
"Mejia",
"Davidson",
"Acosta",
"Brewer",
"May",
"Holland",
"Juarez",
"Newman",
"Pearson",
"Curtis",
"Cortez",
"Douglas",
"Schneider",
"Joseph",
"Barrett",
"Navarro",
"Figueroa",
"Keller",
"Avila",
"Wade",
"Molina",
"Stanley",
"Hopkins",
"Campos",
"Barnett",
"Bates",
"Chambers",
"Caldwell",
"Beck",
"Lambert",
"Miranda",
"Byrd",
"Craig",
"Ayala",
"Lowe",
"Frazier",
"Powers",
"Neal",
"Leonard",
"Gregory",
"Carrillo",
"Sutton",
"Fleming",
"Rhodes",
"Shelton",
"Schwartz",
"Norris",
"Jennings",
"Watts",
"Duran",
"Walters",
"Cohen",
"Mcdaniel",
"Moran",
"Parks",
"Steele",
"Vaughn",
"Becker",
"Holt",
"Deleon",
"Barker",
"Terry",
"Hale",
"Leon",
"Hail",
"Benson",
"Haynes",
"Horton",
"Miles",
"Lyons",
"Pham",
"Graves",
"Bush",
"Thornton",
"Wolfe",
"Warner",
"Cabrera",
"Mckinney",
"Mann",
"Zimmerman",
"Dawson",
"Lara",
"Fletcher",
"Page",
"Mccarthy",
"Love",
"Robles",
"Cervantes",
"Solis",
"Erickson",
"Reeves",
"Chang",
"Klein",
"Salinas",
"Fuentes",
"Baldwin",
"Daniel",
"Simon",
"Velasquez",
"Hardy",
"Higgins",
"Aguirre",
"Lin",
"Cummings",
"Chandler",
"Sharp",
"Barber",
"Bowen",
"Ochoa",
"Dennis",
"Robbins",
"Liu",
"Ramsey",
"Francis",
"Griffith",
"Paul",
"Blair",
"Oconnor",
"Cardenas",
"Pacheco",
"Cross",
"Calderon",
"Quinn",
"Moss",
"Swanson",
"Chan",
"Rivas",
"Khan",
"Rodgers",
"Serrano",
"Fitzgerald",
"Rosales",
"Stevenson",
"Christensen",
"Manning",
"Gill",
"Curry",
"Mclaughlin",
"Harmon",
"Mcgee",
"Gross",
"Doyle",
"Garner",
"Newton",
"Burgess",
"Reese",
"Walton",
"Blake",
"Trujillo",
"Adkins",
"Brady",
"Goodman",
"Roman",
"Webster",
"Goodwin",
"Fischer",
"Huang",
"Potter",
"Delacruz",
"Montoya", | "Wu",
"Hines",
"Mullins",
"Castaneda",
"Malone",
"Cannon",
"Tate",
"Mack",
"Sherman",
"Hubbard",
"Hodges",
"Zhang",
"Guerra",
"Wolf",
"Valencia",
"Saunders",
"Franco",
"Rowe",
"Gallagher",
"Farmer",
"Hammond",
"Hampton",
"Townsend",
"Ingram",
"Wise",
"Gallegos",
"Clarke",
"Barton",
"Schroeder",
"Maxwell",
"Waters",
"Logan",
"Camacho",
"Strickland",
"Norman",
"Person",
"Colon",
"Parsons",
"Frank",
"Harrington",
"Glover",
"Osborne",
"Buchanan",
"Casey",
"Floyd",
"Patton",
"Ibarra",
"Ball",
"Tyler",
"Suarez",
"Bowers",
"Orozco",
"Salas",
"Cobb",
"Gibbs",
"Andrade",
"Bauer",
"Conner",
"Moody",
"Escobar",
"Mcguire",
"Lloyd",
"Mueller",
"Hartman",
"French",
"Kramer",
"Mcbride",
"Pope",
"Lindsey",
"Velazquez",
"Norton",
"Mccormick",
"Sparks",
"Flynn",
"Yates",
"Hogan",
"Marsh",
"Macias",
"Villanueva",
"Zamora",
"Pratt",
"Stokes",
"Owen",
"Ballard",
"Lang",
"Brock",
"Villarreal",
"Charles",
"Drake",
"Barrera",
"Cain",
"Patrick",
"Pineda",
"Burnett",
"Mercado",
"Santana",
"Shepherd",
"Bautista",
"Ali",
"Shaffer",
"Lamb",
"Trevino",
"Mckenzie",
"Hess",
"Beil",
"Olsen",
"Cochran",
"Morton",
"Nash",
"Wilkins",
"Petersen",
"Briggs",
"Shah",
"Roth",
"Nicholson",
"Holloway",
"Lozano",
"Rangel",
"Flowers",
"Hoover",
"Short",
"Arias",
"Mora",
"Valenzuela",
"Bryan",
"Meyers",
"Weiss",
"Underwood",
"Bass",
"Greer",
"Summers",
"Houston",
"Carson",
"Morrow",
"Clayton",
"Whitaker",
"Decker",
"Yoder",
"Collier",
"Zuniga",
"Carey",
"Wilcox",
"Melendez",
"Poole",
"Roberson",
"Larsen",
"Conley",
"Davenport",
"Copeland",
"Massey",
"Lam",
"Huff",
"Rocha",
"Cameron",
"Jefferson",
"Hood",
"Monroe",
"Anthony",
"Pittman",
"Huynh",
"Randall",
"Singleton",
"Kirk",
"Combs",
"Mathis",
"Christian",
"Skinner",
"Bradford",
"Richard",
"Galvan",
"Wall",
"Boone",
"Kirby",
"Wilkinson",
"Bridges",
"Bruce",
"Atkinson",
"Velez",
"Meza",
"Roy",
"Vincent",
"York",
"Hodge",
"Villa",
"Abbott",
"Allison",
"Tapia",
"Gates",
"Chase",
"Sosa",
"Sweeney",
"Farrell",
"Wyatt",
"Dalton",
"Horn",
"Barron",
"Phelps",
"Yu",
"Dickerson",
"Heath",
"Foley",
"Atkins",
"Mathews",
"Bonilla",
"Acevedo",
"Benitez",
"Zavala",
"Hensley",
"Glenn",
"Cisneros",
"Harrell",
"Shields",
"Rubio",
"Huffman",
"Choi",
"Boyer",
"Garrison",
"Arroyo",
"Bond",
"Kane",
"Hancock",
"Callahan",
"Dillon",
"Cline",
"Wiggins",
"Grimes",
"Arellano",
"Melton",
"Oneill",
"Savage",
"Ho",
"Beltran",
"Pitts",
"Parrish",
"Ponce",
"Rich",
"Booth",
"Koch",
"Golden",
"Ware",
"Brennan",
"Mcdowell",
"Marks",
"Cantu",
"Humphrey",
"Baxter",
"Sawyer",
"Clay",
"Tanner",
"Hutchinson",
"Kaur",
"Berg",
"Wiley",
"Gilmore",
"Russo",
"Villegas",
"Hobbs",
"Keith",
"Wilkerson",
"Ahmed",
"Beard",
"Mcclain",
"Montes",
"Mata",
"Rosario",
"Vang",
"Walter",
"Henson",
"Oneal",
"Mosley",
"Mcclure",
"Beasley",
"Stephenson",
"Snow",
"Huerta",
"Preston",
"Vance",
"Barry",
"Johns",
"Eaton",
"Blackwell",
"Dyer",
"Prince",
"Macdonald",
"Solomon",
"Guevara",
"Stafford",
"English",
"Hurst",
"Woodard",
"Cortes",
"Shannon",
"Kemp",
"Nolan",
"Mccullough",
"Merritt",
"Murillo",
"Moon",
"Salgado",
"Strong",
"Kline",
"Cordova",
"Barajas",
"Roach",
"Rosas",
"Winters",
"Jacobson",
"Lester",
"Knox",
"Bullock",
"Kerr",
"Leach",
"Meadows",
"Orr",
"Davila",
"Whitehead",
"Pruitt",
"Kent",
"Conway",
"Mckee",
"Barr",
"David",
"Dejesus",
"Marin",
"Berger",
"Mcintyre",
"Blankenship",
"Gaines",
"Palacios",
"Cuevas",
"Bartlett",
"Durham",
"Dorsey",
"Mccall",
"Odonnell",
"Stein",
"Browning",
"Stout",
"Lowery",
"Sloan",
"Mclean",
"Hendricks",
"Calhoun",
"Sexton",
"Chung",
"Gentry",
"Hull",
"Duarte",
"Ellison",
"Nielsen",
"Gillespie",
"Buck",
"Middleton",
"Sellers",
"Leblanc",
"Esparza",
"Hardin",
"Bradshaw",
"Mcintosh",
"Howe",
"Livingston",
"Frost",
"Glass",
"Morse",
"Knapp",
"Herman",
"Stark",
"Bravo",
"Noble",
"Spears",
"Weeks",
"Corona",
"Frederick",
"Buckley",
"Mcfarland",
"Hebert",
"Enriquez",
"Hickman",
"Quintero",
"Randolph",
"Schaefer",
"Walls",
"Trejo",
"House",
"Reilly",
"Pennington",
"Michael",
"Conrad",
"Giles",
"Benjamin",
"Crosby",
"Fitzpatrick",
"Donovan",
"Mays",
"Mahoney",
"Valentine",
"Raymond",
"Medrano",
"Hahn",
"Mcmillan",
"Small",
"Bentley",
"Felix",
"Peck",
"Lucero",
"Boyle",
"Hanna",
"Pace",
"Rush",
"Hurley",
"Harding",
"Mcconnell",
"Bernal",
"Nava",
"Ayers",
"Everett",
"Ventura",
"Avery",
"Pugh",
"Mayer",
"Bender",
"Shepard",
"Mcmahon",
"Landry",
"Case",
"Sampson",
"Moses",
"Magana",
"Blackburn",
"Dunlap",
"Gould",
"Duffy",
"Vaughan",
"Herring",
"Mckay",
"Espinosa",
"Rivers",
"Farley",
"Bernard",
"Ashley",
"Friedman",
"Potts",
"Truong",
"Costa",
"Correa",
"Blevins",
"Nixon",
"Clements",
"Fry",
"Delarosa",
"Best",
"Benton",
"Lugo",
"Portillo",
"Dougherty",
"Crane",
"Haley",
"Phan",
"Villalobos",
"Blanchard",
"Horne",
"Finley",
"Quintana",
"Lynn",
"Esquivel",
"Bean",
"Dodson",
"Mullen",
"Xiong",
"Hayden",
"Cano",
"Levy",
"Huber",
"Richmond",
"Moyer",
"Lim",
"Frye",
"Sheppard",
"Mccarty",
"Avalos",
"Booker",
"Waller",
"Parra",
"Woodward",
"Jaramillo",
"Krueger",
"Rasmussen",
"Brandt",
"Peralta",
"Donaldson",
"Stuart",
"Faulkner",
"Maynard",
"Galindo",
"Coffey",
"Estes",
"Sanford",
"Burch",
"Maddox",
"Vo",
"Oconnell",
"Vu",
"Andersen",
"Spence",
"Mcpherson",
"Church",
"Schmitt",
"Stanton",
"Leal",
"Cherry",
"Compton",
"Dudley",
"Sierra",
"Pollard",
"Alfaro",
"Hester",
"Proctor",
"Lu",
"Hinton",
"Novak",
"Good",
"Madden",
"Mccann",
"Terrell",
"Jarvis",
"Dickson",
"Reyna",
"Cantrell",
"Mayo",
"Branch",
"Hendrix",
"Rollins",
"Rowland",
"Whitney",
"Duke",
"Odom",
"Daugherty",
"Travis",
"Tang",
"Archer",
} | "Todd", | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.