file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
server.go | can stop just those
_, err := s.AgentConns()
if err != nil {
return err
}
var wg sync.WaitGroup
errs := make(chan error, len(s.agentConns))
for _, conn := range s.agentConns {
conn := conn
wg.Add(1)
go func() {
defer wg.Done()
_, err := conn.AgentClient.StopAgent(context.Background(), &idl.StopAgentRequest{})
if err == nil { // no error means the agent did not terminate as expected
errs <- xerrors.Errorf("failed to stop agent on host: %s", conn.Hostname)
return
}
// XXX: "transport is closing" is not documented but is needed to uniquely interpret codes.Unavailable
// https://github.com/grpc/grpc/blob/v1.24.0/doc/statuscodes.md
errStatus := grpcStatus.Convert(err)
if errStatus.Code() != codes.Unavailable || errStatus.Message() != "transport is closing" {
errs <- xerrors.Errorf("failed to stop agent on host %s : %w", conn.Hostname, err)
}
}()
}
wg.Wait()
close(errs)
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (s *Server) Stop(closeAgentConns bool) {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't try to
// start things up again.
s.stopped = nil
}
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context,
dialer func(context.Context, string) (net.Conn, error),
hostnames []string,
port int,
stateDir string) ([]string, error) {
var wg sync.WaitGroup
restartedHosts := make(chan string, len(hostnames))
errs := make(chan error, len(hostnames))
for _, host := range hostnames {
wg.Add(1)
go func(host string) {
defer wg.Done()
address := host + ":" + strconv.Itoa(port)
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 3*time.Second)
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
}
if dialer != nil {
opts = append(opts, grpc.WithContextDialer(dialer))
}
conn, err := grpc.DialContext(timeoutCtx, address, opts...)
cancelFunc()
if err == nil {
err = conn.Close()
if err != nil {
gplog.Error("failed to close agent connection to %s: %+v", host, err)
}
return
}
gplog.Debug("failed to dial agent on %s: %+v", host, err)
gplog.Info("starting agent on %s", host)
agentPath, err := getAgentPath()
if err != nil {
errs <- err
return
}
cmd := execCommand("ssh", host,
fmt.Sprintf("bash -c \"%s agent --daemonize --port %d --state-directory %s\"", agentPath, port, stateDir))
stdout, err := cmd.Output()
if err != nil {
errs <- err
return
}
gplog.Debug(string(stdout))
restartedHosts <- host
}(host)
}
wg.Wait()
close(errs)
close(restartedHosts)
var hosts []string
for h := range restartedHosts {
hosts = append(hosts, h)
}
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return hosts, multiErr.ErrorOrNil()
}
func (s *Server) AgentConns() ([]*Connection, error) {
// Lock the mutex to protect against races with Server.Stop().
// XXX This is a *ridiculously* broad lock. Have fun waiting for the dial
// timeout when calling Stop() and AgentConns() at the same time, for
// instance. We should not lock around a network operation, but it seems
// like the AgentConns concept is not long for this world anyway.
s.mu.Lock()
defer s.mu.Unlock()
if s.agentConns != nil {
err := EnsureConnsAreReady(s.agentConns)
if err != nil {
gplog.Error("ensureConnsAreReady failed: %s", err)
return nil, err
}
return s.agentConns, nil
}
hostnames := AgentHosts(s.Source)
for _, host := range hostnames {
ctx, cancelFunc := context.WithTimeout(context.Background(), DialTimeout)
conn, err := s.grpcDialer(ctx,
host+":"+strconv.Itoa(s.AgentPort),
grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
err = xerrors.Errorf("grpcDialer failed: %w", err)
gplog.Error(err.Error())
cancelFunc()
return nil, err
}
s.agentConns = append(s.agentConns, &Connection{
Conn: conn,
AgentClient: idl.NewAgentClient(conn),
Hostname: host,
CancelContext: cancelFunc,
})
}
return s.agentConns, nil
}
func EnsureConnsAreReady(agentConns []*Connection) error {
hostnames := []string{}
for _, conn := range agentConns {
if conn.Conn.GetState() != connectivity.Ready {
hostnames = append(hostnames, conn.Hostname)
}
}
if len(hostnames) > 0 {
return fmt.Errorf("the connections to the following hosts were not ready: %s", strings.Join(hostnames, ","))
}
return nil
}
// Closes all h.agentConns. Callers must hold the Server's mutex.
// TODO: this function assumes that all h.agentConns are _not_ in a terminal
// state(e.g. already closed). If so, conn.Conn.WaitForStateChange() can block
// indefinitely.
func (s *Server) closeAgentConns() {
for _, conn := range s.agentConns {
defer conn.CancelContext()
currState := conn.Conn.GetState()
err := conn.Conn.Close()
if err != nil {
gplog.Info(fmt.Sprintf("Error closing hub to agent connection. host: %s, err: %s", conn.Hostname, err.Error()))
}
conn.Conn.WaitForStateChange(context.Background(), currState)
}
}
type InitializeConfig struct {
Standby greenplum.SegConfig
Master greenplum.SegConfig
Primaries []greenplum.SegConfig
Mirrors []greenplum.SegConfig
}
// Config contains all the information that will be persisted to/loaded from
// from disk during calls to Save() and Load().
type Config struct {
Source *greenplum.Cluster
Target *greenplum.Cluster
// TargetInitializeConfig contains all the info needed to initialize the
// target cluster's master, standby, primaries and mirrors.
TargetInitializeConfig InitializeConfig
Port int
AgentPort int
UseLinkMode bool
UpgradeID upgrade.ID
// Tablespaces contains the tablespace in the database keyed by
// dbid and tablespace oid
Tablespaces greenplum.Tablespaces
TablespacesMappingFilePath string
}
func (c *Config) Load(r io.Reader) error {
dec := json.NewDecoder(r)
return dec.Decode(c)
}
func (c *Config) Save(w io.Writer) error {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(c)
}
// SaveConfig persists the hub's configuration to disk.
func (s *Server) SaveConfig() (err error) {
// TODO: Switch to an atomic implementation like renameio. Consider what
// happens if Config.Save() panics: we'll have truncated the file
// on disk and the hub will be unable to recover. For now, since we normally
// only save the configuration during initialize and any configuration
// errors could be fixed by reinitializing, the risk seems small.
file, err := utils.System.Create(upgrade.GetConfigFile())
if err != nil {
return err
}
defer func() {
if cerr := file.Close(); cerr != nil {
cerr = xerrors.Errorf("closing hub configuration: %w", cerr)
err = multierror.Append(err, cerr).ErrorOrNil()
}
}()
err = s.Config.Save(file)
if err != nil | {
return xerrors.Errorf("saving hub configuration: %w", err)
} | conditional_block |
|
server.go | () to Start() that
// Stop() had already beed called, so no need to do anything further
// in Start().
// Note that when used as a flag, nil value means that Stop() has
// been called.
stopped chan struct{}
daemon bool
}
type Connection struct {
Conn *grpc.ClientConn
AgentClient idl.AgentClient
Hostname string
CancelContext func()
}
func New(conf *Config, grpcDialer Dialer, stateDir string) *Server {
h := &Server{
Config: conf,
StateDir: stateDir,
stopped: make(chan struct{}, 1),
grpcDialer: grpcDialer,
}
return h
}
// MakeDaemon tells the Server to disconnect its stdout/stderr streams after
// successfully starting up.
func (s *Server) MakeDaemon() {
s.daemon = true
}
func (s *Server) Start() error {
lis, err := net.Listen("tcp", ":"+strconv.Itoa(s.Port))
if err != nil {
return xerrors.Errorf("listen on port %d: %w", s.Port, err)
}
// Set up an interceptor function to log any panics we get from request
// handlers.
interceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
defer log.WritePanics()
return handler(ctx, req)
}
server := grpc.NewServer(grpc.UnaryInterceptor(interceptor))
s.mu.Lock()
if s.stopped == nil {
// Stop() has already been called; return without serving.
s.mu.Unlock()
return ErrHubStopped
}
s.server = server
s.lis = lis
s.mu.Unlock()
idl.RegisterCliToHubServer(server, s)
reflection.Register(server)
if s.daemon {
fmt.Printf("Hub started on port %d (pid %d)\n", s.Port, os.Getpid())
daemon.Daemonize()
}
err = server.Serve(lis)
if err != nil {
err = xerrors.Errorf("serve: %w", err)
}
// inform Stop() that is it is OK to stop now
s.stopped <- struct{}{}
return err
}
func (s *Server) StopServices(ctx context.Context, in *idl.StopServicesRequest) (*idl.StopServicesReply, error) {
err := s.StopAgents()
if err != nil {
gplog.Debug("failed to stop agents: %#v", err)
}
s.Stop(false)
return &idl.StopServicesReply{}, nil
}
// TODO: add unit tests for this; this is currently tricky due to h.AgentConns()
// mutating global state
func (s *Server) StopAgents() error {
// FIXME: s.AgentConns() fails fast if a single agent isn't available
// we need to connect to all available agents so we can stop just those
_, err := s.AgentConns()
if err != nil {
return err
}
var wg sync.WaitGroup
errs := make(chan error, len(s.agentConns))
for _, conn := range s.agentConns {
conn := conn
wg.Add(1)
go func() {
defer wg.Done()
_, err := conn.AgentClient.StopAgent(context.Background(), &idl.StopAgentRequest{})
if err == nil { // no error means the agent did not terminate as expected
errs <- xerrors.Errorf("failed to stop agent on host: %s", conn.Hostname)
return
}
// XXX: "transport is closing" is not documented but is needed to uniquely interpret codes.Unavailable
// https://github.com/grpc/grpc/blob/v1.24.0/doc/statuscodes.md
errStatus := grpcStatus.Convert(err)
if errStatus.Code() != codes.Unavailable || errStatus.Message() != "transport is closing" {
errs <- xerrors.Errorf("failed to stop agent on host %s : %w", conn.Hostname, err)
}
}()
}
wg.Wait()
close(errs)
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (s *Server) Stop(closeAgentConns bool) |
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context,
dialer func(context.Context, string) (net.Conn, error),
hostnames []string,
port int,
stateDir string) ([]string, error) {
var wg sync.WaitGroup
restartedHosts := make(chan string, len(hostnames))
errs := make(chan error, len(hostnames))
for _, host := range hostnames {
wg.Add(1)
go func(host string) {
defer wg.Done()
address := host + ":" + strconv.Itoa(port)
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 3*time.Second)
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
}
if dialer != nil {
opts = append(opts, grpc.WithContextDialer(dialer))
}
conn, err := grpc.DialContext(timeoutCtx, address, opts...)
cancelFunc()
if err == nil {
err = conn.Close()
if err != nil {
gplog.Error("failed to close agent connection to %s: %+v", host, err)
}
return
}
gplog.Debug("failed to dial agent on %s: %+v", host, err)
gplog.Info("starting agent on %s", host)
agentPath, err := getAgentPath()
if err != nil {
errs <- err
return
}
cmd := execCommand("ssh", host,
fmt.Sprintf("bash -c \"%s agent --daemonize --port %d --state-directory %s\"", agentPath, port, stateDir))
stdout, err := cmd.Output()
if err != nil {
errs <- err
return
}
gplog.Debug(string(stdout))
restartedHosts <- host
}(host)
}
wg.Wait()
close(errs)
close(restartedHosts)
var hosts []string
for h := range restartedHosts {
hosts = append(hosts, h)
}
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return hosts, multiErr.ErrorOrNil()
}
func (s *Server) AgentConns() ([]*Connection, error) {
// Lock the mutex to protect against races with Server.Stop().
// XXX This is a *ridiculously* broad lock. Have fun waiting for the dial
// timeout when calling Stop() and AgentConns() at the same time, for
// instance. We should not lock around a network operation, but it seems
// like the AgentConns concept is not long for this world anyway.
s.mu.Lock()
defer s.mu.Unlock()
if s.agentConns != nil {
err := EnsureConnsAreReady(s.agentConns)
if err != nil {
gplog.Error("ensureConnsAreReady failed: %s", err)
return nil, err
}
return s.agentConns, nil
}
hostnames := AgentHosts(s.Source)
for _, host := range hostnames {
ctx, cancelFunc := context.WithTimeout(context.Background(), DialTimeout)
conn, err := s.grpcDialer(ctx,
host+":"+strconv.Itoa(s.AgentPort),
grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
err = xerrors.Errorf("grpcDialer failed: %w", err)
gplog.Error(err.Error())
cancelFunc()
return nil, err
}
s.agentConns = append(s.agentConns, &Connection{
Conn: conn,
AgentClient: idl.NewAgentClient(conn),
Hostname: host,
CancelContext: cancelFunc,
})
}
return s.agentConns, nil
}
func EnsureConnsAreReady(agentConns []*Connection) error {
hostnames := []string{}
for _, conn := range agentConns {
if conn.Conn.GetState() != connectivity.Ready {
hostnames = append(hostnames, conn.Hostname)
}
}
if len(hostnames) > 0 {
return fmt.Errorf("the connections to the following hosts were not ready: % | {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't try to
// start things up again.
s.stopped = nil
} | identifier_body |
server.go | Stop() to Start() that
// Stop() had already beed called, so no need to do anything further
// in Start().
// Note that when used as a flag, nil value means that Stop() has
// been called.
stopped chan struct{}
daemon bool
}
type Connection struct {
Conn *grpc.ClientConn
AgentClient idl.AgentClient
Hostname string
CancelContext func()
}
func New(conf *Config, grpcDialer Dialer, stateDir string) *Server {
h := &Server{
Config: conf,
StateDir: stateDir,
stopped: make(chan struct{}, 1),
grpcDialer: grpcDialer,
}
return h
}
// MakeDaemon tells the Server to disconnect its stdout/stderr streams after
// successfully starting up.
func (s *Server) MakeDaemon() {
s.daemon = true
}
func (s *Server) Start() error {
lis, err := net.Listen("tcp", ":"+strconv.Itoa(s.Port))
if err != nil {
return xerrors.Errorf("listen on port %d: %w", s.Port, err)
}
// Set up an interceptor function to log any panics we get from request
// handlers.
interceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
defer log.WritePanics()
return handler(ctx, req)
}
server := grpc.NewServer(grpc.UnaryInterceptor(interceptor))
s.mu.Lock()
if s.stopped == nil {
// Stop() has already been called; return without serving.
s.mu.Unlock()
return ErrHubStopped
}
s.server = server
s.lis = lis
s.mu.Unlock()
idl.RegisterCliToHubServer(server, s)
reflection.Register(server) | daemon.Daemonize()
}
err = server.Serve(lis)
if err != nil {
err = xerrors.Errorf("serve: %w", err)
}
// inform Stop() that is it is OK to stop now
s.stopped <- struct{}{}
return err
}
func (s *Server) StopServices(ctx context.Context, in *idl.StopServicesRequest) (*idl.StopServicesReply, error) {
err := s.StopAgents()
if err != nil {
gplog.Debug("failed to stop agents: %#v", err)
}
s.Stop(false)
return &idl.StopServicesReply{}, nil
}
// TODO: add unit tests for this; this is currently tricky due to h.AgentConns()
// mutating global state
func (s *Server) StopAgents() error {
// FIXME: s.AgentConns() fails fast if a single agent isn't available
// we need to connect to all available agents so we can stop just those
_, err := s.AgentConns()
if err != nil {
return err
}
var wg sync.WaitGroup
errs := make(chan error, len(s.agentConns))
for _, conn := range s.agentConns {
conn := conn
wg.Add(1)
go func() {
defer wg.Done()
_, err := conn.AgentClient.StopAgent(context.Background(), &idl.StopAgentRequest{})
if err == nil { // no error means the agent did not terminate as expected
errs <- xerrors.Errorf("failed to stop agent on host: %s", conn.Hostname)
return
}
// XXX: "transport is closing" is not documented but is needed to uniquely interpret codes.Unavailable
// https://github.com/grpc/grpc/blob/v1.24.0/doc/statuscodes.md
errStatus := grpcStatus.Convert(err)
if errStatus.Code() != codes.Unavailable || errStatus.Message() != "transport is closing" {
errs <- xerrors.Errorf("failed to stop agent on host %s : %w", conn.Hostname, err)
}
}()
}
wg.Wait()
close(errs)
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (s *Server) Stop(closeAgentConns bool) {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't try to
// start things up again.
s.stopped = nil
}
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context,
dialer func(context.Context, string) (net.Conn, error),
hostnames []string,
port int,
stateDir string) ([]string, error) {
var wg sync.WaitGroup
restartedHosts := make(chan string, len(hostnames))
errs := make(chan error, len(hostnames))
for _, host := range hostnames {
wg.Add(1)
go func(host string) {
defer wg.Done()
address := host + ":" + strconv.Itoa(port)
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 3*time.Second)
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
}
if dialer != nil {
opts = append(opts, grpc.WithContextDialer(dialer))
}
conn, err := grpc.DialContext(timeoutCtx, address, opts...)
cancelFunc()
if err == nil {
err = conn.Close()
if err != nil {
gplog.Error("failed to close agent connection to %s: %+v", host, err)
}
return
}
gplog.Debug("failed to dial agent on %s: %+v", host, err)
gplog.Info("starting agent on %s", host)
agentPath, err := getAgentPath()
if err != nil {
errs <- err
return
}
cmd := execCommand("ssh", host,
fmt.Sprintf("bash -c \"%s agent --daemonize --port %d --state-directory %s\"", agentPath, port, stateDir))
stdout, err := cmd.Output()
if err != nil {
errs <- err
return
}
gplog.Debug(string(stdout))
restartedHosts <- host
}(host)
}
wg.Wait()
close(errs)
close(restartedHosts)
var hosts []string
for h := range restartedHosts {
hosts = append(hosts, h)
}
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return hosts, multiErr.ErrorOrNil()
}
func (s *Server) AgentConns() ([]*Connection, error) {
// Lock the mutex to protect against races with Server.Stop().
// XXX This is a *ridiculously* broad lock. Have fun waiting for the dial
// timeout when calling Stop() and AgentConns() at the same time, for
// instance. We should not lock around a network operation, but it seems
// like the AgentConns concept is not long for this world anyway.
s.mu.Lock()
defer s.mu.Unlock()
if s.agentConns != nil {
err := EnsureConnsAreReady(s.agentConns)
if err != nil {
gplog.Error("ensureConnsAreReady failed: %s", err)
return nil, err
}
return s.agentConns, nil
}
hostnames := AgentHosts(s.Source)
for _, host := range hostnames {
ctx, cancelFunc := context.WithTimeout(context.Background(), DialTimeout)
conn, err := s.grpcDialer(ctx,
host+":"+strconv.Itoa(s.AgentPort),
grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
err = xerrors.Errorf("grpcDialer failed: %w", err)
gplog.Error(err.Error())
cancelFunc()
return nil, err
}
s.agentConns = append(s.agentConns, &Connection{
Conn: conn,
AgentClient: idl.NewAgentClient(conn),
Hostname: host,
CancelContext: cancelFunc,
})
}
return s.agentConns, nil
}
func EnsureConnsAreReady(agentConns []*Connection) error {
hostnames := []string{}
for _, conn := range agentConns {
if conn.Conn.GetState() != connectivity.Ready {
hostnames = append(hostnames, conn.Hostname)
}
}
if len(hostnames) > 0 {
return fmt.Errorf("the connections to the following hosts were not ready: % |
if s.daemon {
fmt.Printf("Hub started on port %d (pid %d)\n", s.Port, os.Getpid()) | random_line_split |
console_test.go | ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v | {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{ | identifier_body |
|
console_test.go | , conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
| t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, | sock, err := socketPath(bundleDir)
if err != nil { | random_line_split |
console_test.go | , conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil |
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", | {
t.Fatalf("error receiving console FD: %v", err)
} | conditional_block |
console_test.go | , conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
ConsoleSocket: sock,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
for name, conf := range configs(t, false /* noOverlay */) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
t.Fatalf("error creating root dir: %v", err)
}
defer cleanup()
conf.RootDir = rootDir
// Setup the containers.
sleep := []string{"sleep", "100"}
tru := []string{"true"}
testSpecs, ids := createSpecs(sleep, tru)
testSpecs[1].Process.Terminal = true
bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
args := Args{
ID: ids[0],
Spec: testSpecs[0],
BundleDir: bundleDir,
}
rootCont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer rootCont.Destroy()
if err := rootCont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
sock, err := socketPath(bundleDir)
if err != nil {
t.Fatalf("error getting socket path: %v", err)
}
srv, cleanup := createConsoleSocket(t, sock)
defer cleanup()
// Create the container and pass the socket name.
args = Args{
ID: ids[1],
Spec: testSpecs[1],
BundleDir: bundleDir,
ConsoleSocket: sock,
}
cont, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer cont.Destroy()
if err := cont.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Make sure we get a console PTY.
ptyMaster, err := receiveConsolePTY(srv)
if err != nil {
t.Fatalf("error receiving console FD: %v", err)
}
ptyMaster.Close()
})
}
}
// Test that job control signals work on a console created with "exec -ti".
func | (t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
// Create and start the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
BundleDir: bundleDir,
}
c, err := New(conf, args)
if err != nil {
t.Fatalf("error creating container: %v", err)
}
defer c.Destroy()
if err := c.Start(conf); err != nil {
t.Fatalf("error starting container: %v", err)
}
// Create a pty master/replica. The replica will be passed to the exec
// process.
ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
// as disabling interactive mode and job control). Since we want to
// explicitly test interactive mode, use /bin/bash. See b/116981926.
execArgs := &control.ExecArgs{
Filename: "/bin/bash",
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
// Pass the pty replica as FD 0, 1, and 2.
FilePayload: control.NewFilePayload(map[int]*os.File{
0: ptyReplica, 1: ptyReplica, 2: ptyReplica,
}, nil),
StdioIsPty: true,
}
pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
if pid != 2 {
t.Fatalf("exec got pid %d, wanted %d", pid, 2)
}
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Execute sleep.
if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
t.Fatalf("ptyMaster.Write: %v", err)
}
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
// Sleep process should be gone.
expectedPL = expectedPL[:len(expectedPL)-1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
// Make sure the process indicates it was killed by a SIGKILL.
ws, err := c.WaitPID(pid)
if err != nil {
t.Errorf("waiting on container failed: %v", err)
}
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got | TestJobControlSignalExec | identifier_name |
build.py | env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
# This handles gcc, where two source tarballs must be unpacked on top
# of each other.
class MultiTarballTree(DirTree):
def __init__(self, tar_paths):
self._tar_paths = tar_paths
def write_tree(self, env, dest_dir):
assert os.listdir(dest_dir) == []
for tar_file in self._tar_paths:
env.cmd(["tar", "-C", dest_dir, "-xf", tar_file])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
class PatchedTree(DirTree):
def __init__(self, orig_tree, patch_file):
self._orig_tree = orig_tree
self._patch_file = patch_file
def write_tree(self, env, dest_dir):
self._orig_tree.write_tree(env, dest_dir)
env.cmd(["patch", "-d", dest_dir, "-p1", "-i", self._patch_file])
class EnvVarEnv(object):
def __init__(self, envvars, env):
self._envvars = envvars
self._env = env
def cmd(self, args, **kwargs):
return self._env.cmd(
["env"] + ["%s=%s" % (key, value) for key, value in self._envvars]
+ args, **kwargs)
class ModuleBase(object):
def __init__(self, source_dir, build_dir, prefix, install_dir, env_vars):
self._env = cmd_env.VerboseWrapper(cmd_env.BasicEnv())
self._source_dir = source_dir
self._build_dir = build_dir
self._prefix = prefix
self._install_dir = install_dir
self._build_env = cmd_env.PrefixCmdEnv(
cmd_env.in_dir(self._build_dir), EnvVarEnv(env_vars, self._env))
self._args = {"prefix": self._prefix,
"source_dir": self._source_dir}
def all(self):
return action_tree.make_node(
[self.unpack, self.configure, self.make, self.install], self.name)
def unpack(self, log):
if not os.path.exists(self._source_dir):
|
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname),
"-t", dest_dir])
def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def install(self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" | temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir) | conditional_block |
build.py | env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
# This handles gcc, where two source tarballs must be unpacked on top
# of each other.
class MultiTarballTree(DirTree):
def __init__(self, tar_paths):
self._tar_paths = tar_paths
def write_tree(self, env, dest_dir):
assert os.listdir(dest_dir) == []
for tar_file in self._tar_paths:
env.cmd(["tar", "-C", dest_dir, "-xf", tar_file])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
class PatchedTree(DirTree):
def __init__(self, orig_tree, patch_file):
self._orig_tree = orig_tree
self._patch_file = patch_file
def write_tree(self, env, dest_dir):
self._orig_tree.write_tree(env, dest_dir)
env.cmd(["patch", "-d", dest_dir, "-p1", "-i", self._patch_file])
class EnvVarEnv(object):
def __init__(self, envvars, env):
self._envvars = envvars
self._env = env
def cmd(self, args, **kwargs):
return self._env.cmd(
["env"] + ["%s=%s" % (key, value) for key, value in self._envvars]
+ args, **kwargs)
class ModuleBase(object):
def __init__(self, source_dir, build_dir, prefix, install_dir, env_vars):
self._env = cmd_env.VerboseWrapper(cmd_env.BasicEnv())
self._source_dir = source_dir
self._build_dir = build_dir
self._prefix = prefix
self._install_dir = install_dir
self._build_env = cmd_env.PrefixCmdEnv(
cmd_env.in_dir(self._build_dir), EnvVarEnv(env_vars, self._env))
self._args = {"prefix": self._prefix,
"source_dir": self._source_dir}
def all(self):
return action_tree.make_node(
[self.unpack, self.configure, self.make, self.install], self.name)
def unpack(self, log):
if not os.path.exists(self._source_dir):
temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir)
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname),
"-t", dest_dir])
def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def | (self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" | install | identifier_name |
build.py | env.cmd(["tar", "-C", dest_dir, "-xf", self._tar_path])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
# This handles gcc, where two source tarballs must be unpacked on top
# of each other.
class MultiTarballTree(DirTree):
def __init__(self, tar_paths):
self._tar_paths = tar_paths
def write_tree(self, env, dest_dir):
assert os.listdir(dest_dir) == []
for tar_file in self._tar_paths:
env.cmd(["tar", "-C", dest_dir, "-xf", tar_file])
tar_name = get_one(os.listdir(dest_dir))
for leafname in os.listdir(os.path.join(dest_dir, tar_name)):
os.rename(os.path.join(dest_dir, tar_name, leafname),
os.path.join(dest_dir, leafname))
os.rmdir(os.path.join(dest_dir, tar_name))
class PatchedTree(DirTree):
def __init__(self, orig_tree, patch_file):
self._orig_tree = orig_tree
self._patch_file = patch_file
def write_tree(self, env, dest_dir):
self._orig_tree.write_tree(env, dest_dir)
env.cmd(["patch", "-d", dest_dir, "-p1", "-i", self._patch_file])
class EnvVarEnv(object):
def __init__(self, envvars, env):
self._envvars = envvars
self._env = env
def cmd(self, args, **kwargs):
return self._env.cmd(
["env"] + ["%s=%s" % (key, value) for key, value in self._envvars]
+ args, **kwargs)
class ModuleBase(object):
def __init__(self, source_dir, build_dir, prefix, install_dir, env_vars):
self._env = cmd_env.VerboseWrapper(cmd_env.BasicEnv())
self._source_dir = source_dir
self._build_dir = build_dir
self._prefix = prefix
self._install_dir = install_dir
self._build_env = cmd_env.PrefixCmdEnv(
cmd_env.in_dir(self._build_dir), EnvVarEnv(env_vars, self._env))
self._args = {"prefix": self._prefix,
"source_dir": self._source_dir}
def all(self):
return action_tree.make_node(
[self.unpack, self.configure, self.make, self.install], self.name)
def unpack(self, log):
if not os.path.exists(self._source_dir):
temp_dir = "%s.temp" % self._source_dir
os.makedirs(temp_dir)
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir)
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname), | def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def install(self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" | "-t", dest_dir])
| random_line_split |
build.py | )
self.source.write_tree(self._env, temp_dir)
os.rename(temp_dir, self._source_dir)
def remove_tree(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def copy_onto(source_dir, dest_dir):
for leafname in os.listdir(source_dir):
subprocess.check_call(["cp", "-a", os.path.join(source_dir, leafname),
"-t", dest_dir])
def install_destdir(prefix_dir, install_dir, func):
temp_dir = "%s.tmp" % install_dir
remove_tree(temp_dir)
func(temp_dir)
remove_tree(install_dir)
# Tree is installed into $DESTDIR/$prefix.
# We need to strip $prefix.
assert prefix_dir.startswith("/")
os.rename(os.path.join(temp_dir, prefix_dir.lstrip("/")), install_dir)
# TODO: assert that temp_dir doesn't contain anything except prefix dirs
remove_tree(temp_dir)
mkdir_p(prefix_dir)
copy_onto(install_dir, prefix_dir)
binutils_tree = PatchedTree(TarballTree(find_file("binutils-2.20.tar.bz2")),
find_file("binutils-2.20.patch"))
# TODO: Need to glob for multiple patch files
gcc_tree = PatchedTree(MultiTarballTree(
[find_file("gcc-core-4.2.2.tar.bz2"),
find_file("gcc-g++-4.2.2.tar.bz2")]),
find_file("000-gcc-4.2.2.patch"))
newlib_tree = PatchedTree(TarballTree(find_file("newlib-1.17.0.tar.gz")),
find_file("newlib-1.17.0.patch"))
def Module(name, source, configure_cmd, make_cmd, install_cmd):
# TODO: this nested class is ugly
class Mod(ModuleBase):
# These assignments don't work because of Python's odd scoping rules:
# name = name
# source = source
def _subst(self, cmd):
return [arg % self._args for arg in cmd]
def configure(self, log):
mkdir_p(self._build_dir)
self._build_env.cmd(self._subst(configure_cmd))
def make(self, log):
self._build_env.cmd(self._subst(make_cmd))
def install(self, log):
def run(dest):
cmd = [arg % {"destdir": dest} for arg in install_cmd]
self._build_env.cmd(cmd)
install_destdir(self._prefix, self._install_dir, run)
Mod.name = name
Mod.source = source
return Mod
ModuleBinutils = Module(
name="binutils",
source=binutils_tree,
configure_cmd=[
"sh", "-c",
"%(source_dir)s/configure "
'CFLAGS="-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"--prefix=%(prefix)s "
"--target=nacl"],
make_cmd=["make", "-j4"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
common_gcc_options = (
"--with-as=`which nacl-as` " # Experimental
"--disable-libmudflap "
"--disable-decimal-float "
"--disable-libssp "
"--disable-libstdcxx-pch "
"--disable-shared "
"--prefix=%(prefix)s "
"--target=nacl ")
ModulePregcc = Module(
name="pregcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -D__gthr_posix_h '
'-DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--without-headers "
"--enable-languages=c "
"--disable-threads " # pregcc
+ common_gcc_options],
# The default make target doesn't work - it gives libiberty
# configure failures. Need to do "all-gcc" instead.
make_cmd=["make", "all-gcc", "-j2"],
install_cmd=["make", "install-gcc", "DESTDIR=%(destdir)s"])
ModuleFullgcc = Module(
name="fullgcc",
source=gcc_tree,
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
configure_cmd=[
"sh", "-c",
"CC=gcc "
'CFLAGS="-Dinhibit_libc -DNACL_ALIGN_BYTES=32 -DNACL_ALIGN_POW2=5" '
"%(source_dir)s/configure "
"--with-newlib "
"--enable-threads=nacl "
"--enable-tls "
"--disable-libgomp "
'--enable-languages="c,c++" '
+ common_gcc_options],
make_cmd=["make", "all", "-j2"],
install_cmd=["make", "install", "DESTDIR=%(destdir)s"])
class ModuleNewlib(ModuleBase):
name = "newlib"
source = newlib_tree
def configure(self, log):
# This is like exporting the kernel headers to glibc.
# This should be done differently.
self._env.cmd(
[os.path.join(nacl_dir,
"src/trusted/service_runtime/export_header.py"),
os.path.join(nacl_dir, "src/trusted/service_runtime/include"),
os.path.join(self._source_dir, "newlib/libc/sys/nacl")])
mkdir_p(self._build_dir)
# CFLAGS has to be passed via environment because the
# configure script can't cope with spaces otherwise.
self._build_env.cmd([
"sh", "-c",
'CFLAGS="-m32 -march=i486 -msse2 -mfpmath=sse" '
"%(source_dir)s/configure "
"--enable-newlib-io-long-long "
"--enable-newlib-io-c99-formats "
"--prefix=%(prefix)s "
"--target=nacl"
% self._args])
def make(self, log):
self._build_env.cmd(["sh", "-c", "make"])
def install(self, log):
install_destdir(
self._prefix, self._install_dir,
lambda dest: self._build_env.cmd(["make", "install",
"DESTDIR=%s" % dest]))
class ModuleNcthreads(ModuleBase):
name = "nc_threads"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "install_libpthread",
"USE_PATH=1",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnaclHeaders(ModuleBase):
name = "libnacl_headers"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update_header",
"USE_PATH=1",
"nocpp=yes",
"naclsdk_mode=custom:%s" %
os.path.join(dest, self._prefix.lstrip("/")),
"naclsdk_validate=0",
"--verbose"])
install_destdir(self._prefix, self._install_dir, do_make)
class ModuleLibnacl(ModuleBase):
# Covers libnacl.a, crt[1ni].o and misc libraries built with Scons.
| name = "libnacl"
source = EmptyTree()
def configure(self, log):
pass
def make(self, log):
pass
def install(self, log):
mkdir_p(self._build_dir)
# This requires scons to pass PATH through so that it can run
# nacl-gcc. We set naclsdk_mode to point to an empty
# directory so it can't get nacl-gcc from there. However, if
# scons-out is already populated, scons won't try to run
# nacl-gcc.
def do_make(dest):
self._build_env.cmd(
cmd_env.in_dir(nacl_dir) +
["./scons", "MODE=nacl_extra_sdk", "extra_sdk_update", | identifier_body |
|
response.rs | }
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use std::error::Error;
self.description().fmt(f)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match self.message {
None => "<no description available>",
Some(ref message) => &message
}
}
fn | (&self) -> Option<&error::Error> {
None
}
}
impl From<Status> for Error {
fn from(status: Status) -> Error {
Error::new(status, None)
}
}
impl From<(Status, &'static str)> for Error {
fn from(pair: (Status, &'static str)) -> Error {
Error::new(pair.0, Some(Cow::Borrowed(pair.1)))
}
}
impl From<(Status, String)> for Error {
fn from(pair: (Status, String)) -> Error {
Error::new(pair.0, Some(Cow::Owned(pair.1)))
}
}
/// Defines the action to be taken when returning from a handler
pub enum Action {
/// Ends the response with no body and the given status (if given).
///
/// If the status is not given, the status currently set on the response is used.
/// By default, a response has a status 200 OK.
End(Option<Status>),
/// Redirects to the given URL with a 3xx status (use 302 Found if unsure).
Redirect(Status, String),
/// Renders the template with the given name using the given JSON value.
///
/// If no Content-Type header is set, the content type is set to `text/html`.
Render(String, json::Value),
/// Sends the response with the given bytes as the body.
Send(Vec<u8>),
/// Returns a closure that is called with a Stream argument.
Stream(Box<Fn(&mut Any, &mut Write)>),
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
SendFile(String)
}
/// Conversion from `()` into `End(None)`.
impl From<()> for Action {
fn from(_: ()) -> Action {
Action::End(None)
}
}
/// Conversion from `Status` into `End(Some(status))`.
impl From<Status> for Action {
fn from(status: Status) -> Action {
Action::End(Some(status))
}
}
/// Conversion from `(Status, &str)` into `Action::Redirect(status, url)`.
impl<'a> From<(Status, &'a str)> for Action {
fn from(pair: (Status, &'a str)) -> Action {
Action::Redirect(pair.0, pair.1.to_string())
}
}
/// Conversion from `(Status, String)` into `Action::Redirect(status, url)`.
impl From<(Status, String)> for Action {
fn from(pair: (Status, String)) -> Action {
From::from((pair.0, pair.1.as_str()))
}
}
/// Conversion from `(&str, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<'a, T> From<(&'a str, T)> for Action where T: ToJson {
fn from(pair: (&'a str, T)) -> Action {
Action::Render(pair.0.to_string(), pair.1.to_json())
}
}
/// Conversion from `(String, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<T> From<(String, T)> for Action where T: ToJson {
fn from(pair: (String, T)) -> Action {
Action::Render(pair.0, pair.1.to_json())
}
}
/// Conversion from `Vec<u8>` into `Action::Send(bytes)`.
impl From<Vec<u8>> for Action {
fn from(bytes: Vec<u8>) -> Action {
Action::Send(bytes)
}
}
/// Conversion from `&str` into `Action::Send(bytes)`.
impl<'a> From<&'a str> for Action {
fn from(string: &'a str) -> Action {
Action::Send(string.as_bytes().to_vec())
}
}
/// Conversion from `String` into `Action::Send(bytes)`.
impl From<String> for Action {
fn from(string: String) -> Action {
Action::Send(string.into_bytes())
}
}
/// Conversion from `json::Value` into `Action::Send(bytes)`.
impl From<json::Value> for Action {
fn from(json: json::Value) -> Action {
From::from(json.to_string())
}
}
/// Wraps the given closure in a box and returns `Ok(Action::Stream(box))`.
///
/// The closure will be called with a writer implementing the `Write` trait
/// so that each call to `write` notifies the handler that data can be written
/// to the HTTP transport.
pub fn stream<F, T, R>(closure: F) -> Result where T: Any, F: 'static + Fn(&mut T, &mut Write) -> io::Result<R> {
Ok(Action::Stream(Box::new(move |any, writer| {
if let Some(app) = any.downcast_mut::<T>() {
if let Err(e) = closure(app, writer) {
error!("{}", e);
}
}
})))
}
/// This represents the response that will be sent back to the application.
///
/// Includes a status code (default 200 OK), headers, and a body.
/// The response can be updated and sent back immediately in a synchronous way,
/// or deferred pending some computation (asynchronous mode).
///
/// The response is sent when it is dropped.
pub struct Response {
pub status: Status,
pub headers: Headers,
streaming: bool
}
impl Response {
pub fn new() -> Response {
Response {
status: Status::Ok,
headers: Headers::default(),
streaming: false
}
}
/// Sets the status code of this response.
pub fn status(&mut self, status: Status) -> &mut Self {
self.status = status;
self
}
/// Sets the Content-Type header.
pub fn content_type<S: Into<Vec<u8>>>(&mut self, mime: S) -> &mut Self {
self.headers.set_raw("Content-Type", vec![mime.into()]);
self
}
/// Sets the Content-Length header.
pub fn len(&mut self, len: u64) -> &mut Self {
self.headers.set(header::ContentLength(len));
self
}
/// Sets the given cookie.
pub fn cookie(&mut self, cookie: Cookie) {
if self.headers.has::<SetCookie>() {
self.headers.get_mut::<SetCookie>().unwrap().push(cookie)
} else {
self.headers.set(SetCookie(vec![cookie]))
}
}
/// Sets the given header.
pub fn header<H: Header>(&mut self, header: H) -> &mut Self {
self.headers.set(header);
self
}
/// Sets the given header with raw strings.
pub fn header_raw<K: Into<Cow<'static, str>> + fmt::Debug, V: Into<Vec<u8>>>(&mut self, name: K, value: V) -> &mut Self {
self.headers.set_raw(name, vec![value.into()]);
self
}
/// Sets the Location header.
pub fn location<S: Into<String>>(&mut self, url: S) -> &mut Self {
self.headers.set(header::Location(url.into()));
self
}
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
fn send_file<P: AsRef<Path>>(&mut self, path: P) -> Option<Vec<u8>> {
if !self.headers.has::<ContentType>() {
let extension = path.as_ref().extension();
if let Some(ext) = extension {
let content_type = match ext.to_string_lossy().as_ref() {
// application
"js" => Some(("application", "javascript", None)),
"m3u8" => Some(("application", "vnd.apple.mpegurl", None)),
"mpd" => Some(("application", "dash+xml", None)),
"xml" => Some(("application", "xml", None)),
// image
"gif" => Some(("image", "gif", None)),
"jpg" | "jpeg" => Some(("image", "jpeg", None)),
"png" => Some(("image", "png", | cause | identifier_name |
response.rs | , jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
SendFile(String)
}
/// Conversion from `()` into `End(None)`.
impl From<()> for Action {
fn from(_: ()) -> Action {
Action::End(None)
}
}
/// Conversion from `Status` into `End(Some(status))`.
impl From<Status> for Action {
fn from(status: Status) -> Action {
Action::End(Some(status))
}
}
/// Conversion from `(Status, &str)` into `Action::Redirect(status, url)`.
impl<'a> From<(Status, &'a str)> for Action {
fn from(pair: (Status, &'a str)) -> Action {
Action::Redirect(pair.0, pair.1.to_string())
}
}
/// Conversion from `(Status, String)` into `Action::Redirect(status, url)`.
impl From<(Status, String)> for Action {
fn from(pair: (Status, String)) -> Action {
From::from((pair.0, pair.1.as_str()))
}
}
/// Conversion from `(&str, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<'a, T> From<(&'a str, T)> for Action where T: ToJson {
fn from(pair: (&'a str, T)) -> Action {
Action::Render(pair.0.to_string(), pair.1.to_json())
}
}
/// Conversion from `(String, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<T> From<(String, T)> for Action where T: ToJson {
fn from(pair: (String, T)) -> Action {
Action::Render(pair.0, pair.1.to_json())
}
}
/// Conversion from `Vec<u8>` into `Action::Send(bytes)`.
impl From<Vec<u8>> for Action {
fn from(bytes: Vec<u8>) -> Action {
Action::Send(bytes)
}
}
/// Conversion from `&str` into `Action::Send(bytes)`.
impl<'a> From<&'a str> for Action {
fn from(string: &'a str) -> Action {
Action::Send(string.as_bytes().to_vec())
}
}
/// Conversion from `String` into `Action::Send(bytes)`.
impl From<String> for Action {
fn from(string: String) -> Action {
Action::Send(string.into_bytes())
}
}
/// Conversion from `json::Value` into `Action::Send(bytes)`.
impl From<json::Value> for Action {
fn from(json: json::Value) -> Action {
From::from(json.to_string())
}
}
/// Wraps the given closure in a box and returns `Ok(Action::Stream(box))`.
///
/// The closure will be called with a writer implementing the `Write` trait
/// so that each call to `write` notifies the handler that data can be written
/// to the HTTP transport.
pub fn stream<F, T, R>(closure: F) -> Result where T: Any, F: 'static + Fn(&mut T, &mut Write) -> io::Result<R> {
Ok(Action::Stream(Box::new(move |any, writer| {
if let Some(app) = any.downcast_mut::<T>() {
if let Err(e) = closure(app, writer) {
error!("{}", e);
}
}
})))
}
/// This represents the response that will be sent back to the application.
///
/// Includes a status code (default 200 OK), headers, and a body.
/// The response can be updated and sent back immediately in a synchronous way,
/// or deferred pending some computation (asynchronous mode).
///
/// The response is sent when it is dropped.
pub struct Response {
pub status: Status,
pub headers: Headers,
streaming: bool
}
impl Response {
pub fn new() -> Response {
Response {
status: Status::Ok,
headers: Headers::default(),
streaming: false
}
}
/// Sets the status code of this response.
pub fn status(&mut self, status: Status) -> &mut Self {
self.status = status;
self
}
/// Sets the Content-Type header.
pub fn content_type<S: Into<Vec<u8>>>(&mut self, mime: S) -> &mut Self {
self.headers.set_raw("Content-Type", vec![mime.into()]);
self
}
/// Sets the Content-Length header.
pub fn len(&mut self, len: u64) -> &mut Self {
self.headers.set(header::ContentLength(len));
self
}
/// Sets the given cookie.
pub fn cookie(&mut self, cookie: Cookie) {
if self.headers.has::<SetCookie>() {
self.headers.get_mut::<SetCookie>().unwrap().push(cookie)
} else {
self.headers.set(SetCookie(vec![cookie]))
}
}
/// Sets the given header.
pub fn header<H: Header>(&mut self, header: H) -> &mut Self {
self.headers.set(header);
self
}
/// Sets the given header with raw strings.
pub fn header_raw<K: Into<Cow<'static, str>> + fmt::Debug, V: Into<Vec<u8>>>(&mut self, name: K, value: V) -> &mut Self {
self.headers.set_raw(name, vec![value.into()]);
self
}
/// Sets the Location header.
pub fn location<S: Into<String>>(&mut self, url: S) -> &mut Self {
self.headers.set(header::Location(url.into()));
self
}
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
fn send_file<P: AsRef<Path>>(&mut self, path: P) -> Option<Vec<u8>> {
if !self.headers.has::<ContentType>() {
let extension = path.as_ref().extension();
if let Some(ext) = extension {
let content_type = match ext.to_string_lossy().as_ref() {
// application
"js" => Some(("application", "javascript", None)),
"m3u8" => Some(("application", "vnd.apple.mpegurl", None)),
"mpd" => Some(("application", "dash+xml", None)),
"xml" => Some(("application", "xml", None)),
// image
"gif" => Some(("image", "gif", None)),
"jpg" | "jpeg" => Some(("image", "jpeg", None)),
"png" => Some(("image", "png", None)),
// text
"css" => Some(("text", "css", None)),
"htm" | "html" => Some(("text", "html", Some((Attr::Charset, Value::Utf8)))),
"txt" => Some(("text", "plain", Some((Attr::Charset, Value::Utf8)))),
// video
"avi" => Some(("video", "x-msvideo", None)),
"mp4" => Some(("video", "mp4", None)),
"mpg" | "mpeg" => Some(("video", "mpeg", None)),
"ts" => Some(("video", "mp2t", None)),
_ => None
};
if let Some((top, sub, attr)) = content_type {
self.headers.set(ContentType(Mime(TopLevel::Ext(top.to_string()),
SubLevel::Ext(sub.to_string()),
match attr {
None => vec![],
Some(val) => vec![val]
}
)));
}
}
}
// read the whole file at once and send it
// probably not the best idea for big files, we should use stream instead in that case
match File::open(path) {
Ok(mut file) => {
let mut buf = Vec::with_capacity(file.metadata().ok().map_or(1024, |meta| meta.len() as usize));
if let Err(err) = file.read_to_end(&mut buf) {
self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
} else {
Some(buf)
}
},
Err(ref err) if err.kind() == ErrorKind::NotFound => {
self.status(Status::NotFound);
None
},
Err(ref err) => {
self.status(Status::InternalServerError).content_type("text/plain");
Some(format!("{}", err).into())
}
}
}
}
pub fn send_file<P: AsRef<Path>>(response: &mut Response, path: P) -> Option<Vec<u8>> {
response.send_file(path)
}
pub fn set_streaming(response: &mut Response) {
response.streaming = true;
}
pub fn is_streaming(response: &Response) -> bool | {
response.streaming
} | identifier_body |
|
response.rs | to the given URL with a 3xx status (use 302 Found if unsure).
Redirect(Status, String),
/// Renders the template with the given name using the given JSON value.
///
/// If no Content-Type header is set, the content type is set to `text/html`.
Render(String, json::Value),
/// Sends the response with the given bytes as the body.
Send(Vec<u8>),
/// Returns a closure that is called with a Stream argument.
Stream(Box<Fn(&mut Any, &mut Write)>),
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
SendFile(String)
}
/// Conversion from `()` into `End(None)`.
impl From<()> for Action {
fn from(_: ()) -> Action {
Action::End(None)
}
}
/// Conversion from `Status` into `End(Some(status))`.
impl From<Status> for Action {
fn from(status: Status) -> Action {
Action::End(Some(status))
}
}
/// Conversion from `(Status, &str)` into `Action::Redirect(status, url)`.
impl<'a> From<(Status, &'a str)> for Action {
fn from(pair: (Status, &'a str)) -> Action {
Action::Redirect(pair.0, pair.1.to_string())
}
}
/// Conversion from `(Status, String)` into `Action::Redirect(status, url)`.
impl From<(Status, String)> for Action {
fn from(pair: (Status, String)) -> Action {
From::from((pair.0, pair.1.as_str()))
}
}
/// Conversion from `(&str, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<'a, T> From<(&'a str, T)> for Action where T: ToJson {
fn from(pair: (&'a str, T)) -> Action {
Action::Render(pair.0.to_string(), pair.1.to_json())
}
}
/// Conversion from `(String, T)`, where `T` can be converted to a JSON value,
/// into `Action::Render(template_name, json)`.
impl<T> From<(String, T)> for Action where T: ToJson {
fn from(pair: (String, T)) -> Action {
Action::Render(pair.0, pair.1.to_json())
}
}
/// Conversion from `Vec<u8>` into `Action::Send(bytes)`.
impl From<Vec<u8>> for Action {
fn from(bytes: Vec<u8>) -> Action {
Action::Send(bytes)
}
}
/// Conversion from `&str` into `Action::Send(bytes)`.
impl<'a> From<&'a str> for Action {
fn from(string: &'a str) -> Action {
Action::Send(string.as_bytes().to_vec())
}
}
/// Conversion from `String` into `Action::Send(bytes)`.
impl From<String> for Action {
fn from(string: String) -> Action {
Action::Send(string.into_bytes())
}
}
/// Conversion from `json::Value` into `Action::Send(bytes)`.
impl From<json::Value> for Action {
fn from(json: json::Value) -> Action {
From::from(json.to_string())
}
}
/// Wraps the given closure in a box and returns `Ok(Action::Stream(box))`.
///
/// The closure will be called with a writer implementing the `Write` trait
/// so that each call to `write` notifies the handler that data can be written
/// to the HTTP transport.
pub fn stream<F, T, R>(closure: F) -> Result where T: Any, F: 'static + Fn(&mut T, &mut Write) -> io::Result<R> {
Ok(Action::Stream(Box::new(move |any, writer| {
if let Some(app) = any.downcast_mut::<T>() {
if let Err(e) = closure(app, writer) {
error!("{}", e);
}
}
})))
}
/// This represents the response that will be sent back to the application.
///
/// Includes a status code (default 200 OK), headers, and a body.
/// The response can be updated and sent back immediately in a synchronous way,
/// or deferred pending some computation (asynchronous mode).
///
/// The response is sent when it is dropped.
pub struct Response {
pub status: Status,
pub headers: Headers,
streaming: bool
}
impl Response {
pub fn new() -> Response {
Response {
status: Status::Ok,
headers: Headers::default(),
streaming: false
}
}
/// Sets the status code of this response.
pub fn status(&mut self, status: Status) -> &mut Self {
self.status = status;
self
}
/// Sets the Content-Type header.
pub fn content_type<S: Into<Vec<u8>>>(&mut self, mime: S) -> &mut Self {
self.headers.set_raw("Content-Type", vec![mime.into()]);
self
}
/// Sets the Content-Length header.
pub fn len(&mut self, len: u64) -> &mut Self {
self.headers.set(header::ContentLength(len));
self
}
/// Sets the given cookie.
pub fn cookie(&mut self, cookie: Cookie) {
if self.headers.has::<SetCookie>() {
self.headers.get_mut::<SetCookie>().unwrap().push(cookie)
} else {
self.headers.set(SetCookie(vec![cookie]))
}
}
/// Sets the given header.
pub fn header<H: Header>(&mut self, header: H) -> &mut Self {
self.headers.set(header);
self
}
/// Sets the given header with raw strings.
pub fn header_raw<K: Into<Cow<'static, str>> + fmt::Debug, V: Into<Vec<u8>>>(&mut self, name: K, value: V) -> &mut Self {
self.headers.set_raw(name, vec![value.into()]);
self
}
/// Sets the Location header.
pub fn location<S: Into<String>>(&mut self, url: S) -> &mut Self {
self.headers.set(header::Location(url.into()));
self
}
/// Sends the given file, setting the Content-Type based on the file's extension.
///
/// Known extensions are:
/// - application: js, m3u8, mpd, xml
/// - image: gif, jpg, jpeg, png
/// - text: css, htm, html, txt
/// - video: avi, mp4, mpg, mpeg, ts
/// If the file does not exist, this method sends a 404 Not Found response.
fn send_file<P: AsRef<Path>>(&mut self, path: P) -> Option<Vec<u8>> {
if !self.headers.has::<ContentType>() {
let extension = path.as_ref().extension();
if let Some(ext) = extension {
let content_type = match ext.to_string_lossy().as_ref() {
// application
"js" => Some(("application", "javascript", None)),
"m3u8" => Some(("application", "vnd.apple.mpegurl", None)),
"mpd" => Some(("application", "dash+xml", None)),
"xml" => Some(("application", "xml", None)),
// image
"gif" => Some(("image", "gif", None)),
"jpg" | "jpeg" => Some(("image", "jpeg", None)),
"png" => Some(("image", "png", None)),
// text
"css" => Some(("text", "css", None)),
"htm" | "html" => Some(("text", "html", Some((Attr::Charset, Value::Utf8)))),
"txt" => Some(("text", "plain", Some((Attr::Charset, Value::Utf8)))),
// video
"avi" => Some(("video", "x-msvideo", None)),
"mp4" => Some(("video", "mp4", None)),
"mpg" | "mpeg" => Some(("video", "mpeg", None)),
"ts" => Some(("video", "mp2t", None)),
_ => None
};
if let Some((top, sub, attr)) = content_type {
self.headers.set(ContentType(Mime(TopLevel::Ext(top.to_string()),
SubLevel::Ext(sub.to_string()),
match attr {
None => vec![],
Some(val) => vec![val]
}
)));
}
}
}
// read the whole file at once and send it | // probably not the best idea for big files, we should use stream instead in that case
match File::open(path) {
Ok(mut file) => {
let mut buf = Vec::with_capacity(file.metadata().ok().map_or(1024, |meta| meta.len() as usize));
if let Err(err) = file.read_to_end(&mut buf) { | random_line_split |
|
node.go | "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string { |
func (n *Node) RegisterFlags(flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil {
return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil {
return
}
}
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node |
return short
}
| identifier_body |
node.go | "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string {
return short
}
func (n *Node) RegisterFlags(flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil {
return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil { | }
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node) |
return
}
| conditional_block |
node.go | etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string {
return short
}
func (n *Node) R | flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil {
return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil {
return
}
}
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node | egisterFlags( | identifier_name |
node.go | "/etc/kubernetes/aws.conf"
kubeadmAzureConfig = "/etc/kubernetes/azure.conf"
cniDir = "/etc/cni/net.d"
cniBridgeConfig = "/etc/cni/net.d/10-bridge.conf"
cniLoopbackConfig = "/etc/cni/net.d/99-loopback.conf"
maxJoinRetries = 5
)
var _ phases.Runnable = (*Node)(nil)
type Node struct {
config config.Config
kubernetesVersion string
containerRuntime string
advertiseAddress string
apiServerHostPort string
kubeadmToken string
caCertHash string
ResetOnFailure bool
podNetworkCIDR string
cloudProvider string
nodepool string
azureTenantID string
azureSubnetName string
azureSecurityGroupName string
azureVNetName string
azureVNetResourceGroup string
azureVMType string
azureLoadBalancerSku string
azureRouteTableName string
taints []string
labels []string
}
func NewCommand(config config.Config) *cobra.Command {
return phases.NewCommand(&Node{config: config})
}
func (n *Node) Use() string {
return use
}
func (n *Node) Short() string {
return short
}
func (n *Node) RegisterFlags(flags *pflag.FlagSet) {
// Kubernetes version
flags.String(constants.FlagKubernetesVersion, n.config.Kubernetes.Version, "Kubernetes version")
// Kubernetes container runtime
flags.String(constants.FlagContainerRuntime, n.config.ContainerRuntime.Type, "Kubernetes container runtime")
// Kubernetes network
flags.String(constants.FlagPodNetworkCIDR, "", "range of IP addresses for the pod network on the current node")
// Pipeline
flags.StringP(constants.FlagPipelineAPIEndpoint, constants.FlagPipelineAPIEndpointShort, "", "Pipeline API server url")
flags.StringP(constants.FlagPipelineAPIToken, constants.FlagPipelineAPITokenShort, "", "Token for accessing Pipeline API")
flags.Bool(constants.FlagPipelineAPIInsecure, false, "If the Pipeline API should not verify the API's certificate")
flags.Int32(constants.FlagPipelineOrganizationID, 0, "Organization ID to use with Pipeline API")
flags.Int32(constants.FlagPipelineClusterID, 0, "Cluster ID to use with Pipeline API")
// Kubernetes cloud provider (optional)
flags.String(constants.FlagCloudProvider, "", "cloud provider. example: aws")
// Control Plane
flags.String(constants.FlagAdvertiseAddress, "", "Kubernetes API Server advertise address")
_ = flags.MarkHidden(constants.FlagAdvertiseAddress)
// Kubernetes cluster join parameters
flags.String(constants.FlagAPIServerHostPort, "", "Kubernetes API Server host port")
flags.String(constants.FlagKubeadmToken, "", "PKE join token")
flags.String(constants.FlagCACertHash, "", "CA cert hash")
flags.Bool(constants.FlagResetOnFailure, false, "Roll back changes after failures")
// Pipeline nodepool name (optional)
flags.String(constants.FlagPipelineNodepool, "", "name of the nodepool the node belongs to")
// Azure cloud
flags.String(constants.FlagAzureTenantID, "", "The AAD Tenant ID for the Subscription that the cluster is deployed in")
flags.String(constants.FlagAzureSubnetName, "", "The name of the subnet that the cluster is deployed in")
flags.String(constants.FlagAzureSecurityGroupName, "", "The name of the security group attached to the cluster's subnet")
flags.String(constants.FlagAzureVNetName, "", "The name of the VNet that the cluster is deployed in")
flags.String(constants.FlagAzureVNetResourceGroup, "", "The name of the resource group that the Vnet is deployed in")
flags.String(constants.FlagAzureVMType, "standard", "The type of azure nodes. Candidate values are: vmss and standard")
flags.String(constants.FlagAzureLoadBalancerSku, "basic", "Sku of Load Balancer and Public IP. Candidate values are: basic and standard")
flags.String(constants.FlagAzureRouteTableName, "kubernetes-routes", "The name of the route table attached to the subnet that the cluster is deployed in")
// Taints
flags.StringSlice(constants.FlagTaints, nil, "Specifies the taints the Node should be registered with")
// Labels
flags.StringSlice(constants.FlagLabels, nil, "Specifies the labels the Node should be registered with")
}
func (n *Node) Validate(cmd *cobra.Command) error {
if err := n.workerBootstrapParameters(cmd); err != nil { | return err
}
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagKubernetesVersion: n.kubernetesVersion,
constants.FlagContainerRuntime: n.containerRuntime,
constants.FlagAPIServerHostPort: n.apiServerHostPort,
constants.FlagKubeadmToken: n.kubeadmToken,
constants.FlagCACertHash: n.caCertHash,
}); err != nil {
return err
}
// Azure specific required flags
if n.cloudProvider == constants.CloudProviderAzure {
if err := validator.NotEmpty(map[string]interface{}{
constants.FlagAzureTenantID: n.azureTenantID,
constants.FlagAzureSubnetName: n.azureSubnetName,
constants.FlagAzureSecurityGroupName: n.azureSecurityGroupName,
constants.FlagAzureVNetName: n.azureVNetName,
constants.FlagAzureVNetResourceGroup: n.azureVNetResourceGroup,
constants.FlagAzureVMType: n.azureVMType,
constants.FlagAzureLoadBalancerSku: n.azureLoadBalancerSku,
constants.FlagAzureRouteTableName: n.azureRouteTableName,
}); err != nil {
return err
}
}
switch n.containerRuntime {
case constants.ContainerRuntimeContainerd,
constants.ContainerRuntimeDocker:
// break
default:
return errors.Wrapf(constants.ErrUnsupportedContainerRuntime, "container runtime: %s", n.containerRuntime)
}
flags.PrintFlags(cmd.OutOrStdout(), n.Use(), cmd.Flags())
return nil
}
func (n *Node) Run(out io.Writer) error {
_, _ = fmt.Fprintf(out, "[%s] running\n", n.Use())
if err := n.install(out); err != nil {
if n.ResetOnFailure {
if rErr := kubeadm.Reset(out, n.containerRuntime); rErr != nil {
_, _ = fmt.Fprintf(out, "%v\n", rErr)
}
}
return err
}
return nil
}
func (n *Node) workerBootstrapParameters(cmd *cobra.Command) (err error) {
n.kubernetesVersion, err = cmd.Flags().GetString(constants.FlagKubernetesVersion)
if err != nil {
return
}
n.containerRuntime, err = cmd.Flags().GetString(constants.FlagContainerRuntime)
if err != nil {
return
}
// Override values with flags
n.advertiseAddress, err = cmd.Flags().GetString(constants.FlagAdvertiseAddress)
if err != nil {
return
}
n.apiServerHostPort, err = cmd.Flags().GetString(constants.FlagAPIServerHostPort)
if err != nil {
return
}
n.kubeadmToken, err = cmd.Flags().GetString(constants.FlagKubeadmToken)
if err != nil {
return
}
n.caCertHash, err = cmd.Flags().GetString(constants.FlagCACertHash)
if err != nil {
return
}
n.ResetOnFailure, err = cmd.Flags().GetBool(constants.FlagResetOnFailure)
if err != nil {
return
}
if n.kubeadmToken == "" && n.caCertHash == "" {
n.apiServerHostPort, n.kubeadmToken, n.caCertHash, err = pipelineutil.NodeJoinArgs(os.Stdout, cmd)
if err != nil {
return
}
}
n.podNetworkCIDR, err = cmd.Flags().GetString(constants.FlagPodNetworkCIDR)
if err != nil {
return
}
n.cloudProvider, err = cmd.Flags().GetString(constants.FlagCloudProvider)
if err != nil {
return
}
n.nodepool, err = cmd.Flags().GetString(constants.FlagPipelineNodepool)
if err != nil {
return
}
n.azureTenantID, err = cmd.Flags().GetString(constants.FlagAzureTenantID)
if err != nil {
return
}
n.azureSubnetName, err = cmd.Flags().GetString(constants.FlagAzureSubnetName)
if err != nil {
return
}
n.azureSecurityGroupName, err = cmd.Flags().GetString(constants.FlagAzureSecurityGroupName)
if err != nil {
return
}
n.azureVNetName, err = cmd.Flags().GetString(constants.FlagAzureVNetName)
if err != nil {
return
}
n.azureVNetResourceGroup, err = cmd.Flags().GetString(constants.FlagAzureVNetResourceGroup)
if err != nil {
return
}
n.azureVMType, err = cmd.Flags().GetString(constants.FlagAzureVMType)
if err != nil {
return
}
n.azureLoadBalancerSku, err = cmd.Flags().GetString(constants.FlagAzureLoadBalancerSku)
if err != nil {
return
}
n.azureRouteTableName, err = cmd.Flags().GetString(constants.FlagAzureRouteTableName)
if err != nil {
return
}
n.taints, err = cmd.Flags().GetStringSlice(constants.FlagTaints)
if err != nil {
return
}
n.labels, err = cmd.Flags().GetStringSlice(constants.FlagLabels)
return
}
func (n *Node) install | random_line_split |
|
perf_tool.go | "},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil |
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
}
func testSingleCall(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui | {
rPipe.Close()
return nil, 0, err
} | conditional_block |
perf_tool.go | "},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil {
rPipe.Close()
return nil, 0, err
}
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) |
func testSingleCall(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui | {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
} | identifier_body |
perf_tool.go | "},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil {
rPipe.Close()
return nil, 0, err
}
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
}
func | (ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui")
| testSingleCall | identifier_name |
perf_tool.go | arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}, {
Name: "etm_stress",
Val: testCase{
quipperArgs: []string{"--run_inject", "--inject_args", "inject;--itrace=i512il;--strip",
"--", "record", "-e", "cs_etm/autofdo/", "-a", "-N"},
disableCPUIdle: true,
repetition: 100,
},
Timeout: 30 * time.Minute,
ExtraAttr: []string{"group:stress"},
ExtraSoftwareDeps: []string{"arm"},
ExtraHardwareDeps: hwdep.D(hwdep.Platform("trogdor", "herobrine")),
}},
})
}
const defaultDuration = 4
// PerfTool tests D-bus methods related to debugd's PerfTool.
func PerfTool(ctx context.Context, s *testing.State) {
dbgd, err := debugd.New(ctx)
if err != nil {
s.Fatal("Failed to connect to debugd D-Bus service: ", err)
}
rep := s.Param().(testCase).repetition
if rep > 1 {
// Stress tests run for the single call only.
for i := 0; i < rep; i++ {
testSingleCall(ctx, s, dbgd)
}
} else {
testSingleCall(ctx, s, dbgd)
testConsecutiveCalls(ctx, s, dbgd)
testConcurrentCalls(ctx, s, dbgd)
testStopEarly(ctx, s, dbgd)
testSurviveUICrash(ctx, s, dbgd)
testRestoreCPUIdle(ctx, s, dbgd)
}
}
func getPerfOutput(ctx context.Context, s *testing.State, d *debugd.Debugd,
tc testCase, durationSec int) (*os.File, uint64, error) {
qprArgs := append([]string{"--duration", strconv.Itoa(durationSec)}, tc.quipperArgs...)
rPipe, wPipe, err := os.Pipe()
if err != nil {
s.Fatal("Failed to create status pipe: ", err)
}
defer wPipe.Close()
sessionID, err := d.GetPerfOutputV2(ctx, qprArgs, tc.disableCPUIdle, wPipe)
if err != nil {
rPipe.Close()
return nil, 0, err
}
if sessionID == 0 {
s.Fatal("Invalid session ID from GetPerfOutputFd")
}
return rPipe, sessionID, nil
}
func checkPerfData(s *testing.State, result []byte) {
const minResultLength = 20
s.Logf("GetPerfOutputV2() returned %d bytes of perf data", len(result))
if len(result) < minResultLength {
s.Fatal("Perf output is too small")
}
if bytes.HasPrefix(result, []byte("<process exited with status: ")) {
s.Fatalf("Quipper failed: %s", string(result))
}
}
func testSingleCall(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSingleCall", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes()) | func testConsecutiveCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConsecutiveCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 1
for i := 0; i < 3; i++ {
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}
})
}
func testConcurrentCalls(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testConcurrentCalls", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
repetition := 3
errc := make(chan error, repetition)
var wg sync.WaitGroup
for i := 0; i < repetition; i++ {
wg.Add(1)
go func() {
defer wg.Done()
output, sessionID, err := getPerfOutput(ctx, s, d, tc, defaultDuration)
if err != nil {
errc <- err
return
}
defer output.Close()
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
checkPerfData(s, buf.Bytes())
}()
}
wg.Wait()
close(errc)
ec := 0
for err := range errc {
s.Log("\"Existing perf tool running\" error expected, got: ", err)
ec++
}
if ec != repetition-1 {
s.Errorf("Calling GetPerfOutputV2 %d times concurrently, got %d errors, want %d",
repetition, ec, repetition-1)
}
})
}
func testStopEarly(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testStopEarly", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
durationSec := 15
stop := 4
start := time.Now()
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
})
}
output, sessionID, err := getPerfOutput(ctx, s, d, tc, durationSec)
if err != nil {
s.Fatal("Failed to call GetPerfOutputV2: ", err)
}
defer output.Close()
time.AfterFunc(time.Duration(stop)*time.Second, func() {
if err := d.StopPerf(ctx, sessionID); err != nil {
s.Fatal("Failed to call StopPerf: ", err)
}
})
s.Log("Session ID: ", sessionID)
var buf bytes.Buffer
if _, err := io.Copy(&buf, output); err != nil {
s.Fatal("Failed to read perf output: ", err)
}
rt := time.Now().Sub(start)
if rt >= time.Duration(durationSec)*time.Second {
s.Errorf("Failed to stop perf after %d seconds", stop)
}
s.Log("Real perf elapsed time: ", rt)
if tc.disableCPUIdle {
err := testing.Poll(ctx, func(_ context.Context) error {
return checkCPUIdleDisabled(false)
}, &testing.PollOptions{
Timeout: 3 * time.Second,
Interval: time.Second,
})
if err != nil {
s.Error("CPU Idle state not restored after perf collection: ", err)
}
}
checkPerfData(s, buf.Bytes())
})
}
func testSurviveUICrash(ctx context.Context, s *testing.State, d *debugd.Debugd) {
s.Run(ctx, "testSurviveUICrash", func(ctx context.Context, s *testing.State) {
tc := s.Param().(testCase)
if tc.disableCPUIdle {
time.AfterFunc(time.Second, func() {
if err := checkCPUIdleDisabled(true); err != nil {
s.Error("CPU Idle state not disabled during ETM collection: ", err)
}
cmd := exec.Command("stop", "ui")
| })
}
| random_line_split |
dockerapi.go | []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
}
}
}
// createContainer creates a new container for the dockerImageName
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
// Returns the new container's struct abstraction, error.
// Upon error it panics.
// Credit: https://medium.com/tarkalabs/controlling-the-docker-engine-in-go-826012f9671c
func createContainer(dockerClient *client.Client, dockerImageName string, httpServerContainerPort int, httpServerHostPort int) (container.ContainerCreateCreatedBody, error) {
hostBinding := nat.PortBinding{
HostIP: "0.0.0.0",
HostPort: fmt.Sprintf("%d", httpServerHostPort),
}
containerPort, err := nat.NewPort("tcp", fmt.Sprintf("%d", httpServerContainerPort))
if err != nil {
log.Panicf("Unable to create a tcp httpServerContainerPort %d\n", httpServerContainerPort)
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
containerBody, err := dockerClient.ContainerCreate(context.Background(),
&container.Config{Image: dockerImageName},
&container.HostConfig{
PortBindings: portBinding,
AutoRemove: true,
},
nil,
fmt.Sprintf("HttpServerAt_%d", httpServerHostPort))
if err != nil {
log.Panicf("ContainerCreate failed for the image: %s, host port: %d with error: %s\n", dockerImageName, httpServerContainerPort, err)
}
return containerBody, err
}
// setContainerLive starts a created container in active live state.
func setContainerLive(dockerClient *client.Client, containerID string) (string, error) | {
err := dockerClient.ContainerStart(context.Background(), containerID, types.ContainerStartOptions{})
return containerID, err
} | identifier_body |
|
dockerapi.go | () {
readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int)
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Deleting container: %v from previous launch.\n", containerID)
err = dockerClient.ContainerStop(context.Background(), containerID, nil)
}
}
}
// GetDockerClient returns a docker remote api client handle value foundational to all Docker remote api interactions.
// Upon error it panics.
// This process creates a docker client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Panicf("Docker client.NewClientWithOpts error: %s\n", err)
}
dockerClient.NegotiateAPIVersion(ctx)
return dockerClient
}
// BuildDockerImage builds a Docker Image for a given dockerFilePath located in the same folder
// of the running process.
// Upon Error it exits process.
func BuildDockerImage(dockerClient *client.Client, dockerFilePath string) {
tarDockerfileReader, err := archive.TarWithOptions(dockerFilePath, &archive.TarOptions{})
if err != nil {
log.Fatal(err, " :unable to create tar with Dockerfile")
}
log.Printf("Building Docker Image in %q\n", dockerFilePath)
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: true,
Tags: []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
| RemoveLiveContainersFromPreviousRun | identifier_name |
|
dockerapi.go |
if err == nil {
defer mapsi2disk.DeleteFile(mapsi2disk.GobFilename)
dockerClient := GetDockerClient()
for containerID := range readBackOwnedContainers {
log.Printf("Deleting container: %v from previous launch.\n", containerID)
err = dockerClient.ContainerStop(context.Background(), containerID, nil)
}
}
}
// GetDockerClient returns a docker remote api client handle value foundational to all Docker remote api interactions.
// Upon error it panics.
// This process creates a docker client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Panicf("Docker client.NewClientWithOpts error: %s\n", err)
}
dockerClient.NegotiateAPIVersion(ctx)
return dockerClient
}
// BuildDockerImage builds a Docker Image for a given dockerFilePath located in the same folder
// of the running process.
// Upon Error it exits process.
func BuildDockerImage(dockerClient *client.Client, dockerFilePath string) {
tarDockerfileReader, err := archive.TarWithOptions(dockerFilePath, &archive.TarOptions{})
if err != nil {
log.Fatal(err, " :unable to create tar with Dockerfile")
}
log.Printf("Building Docker Image in %q\n", dockerFilePath)
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: true,
Tags: []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
}
}
}
// | readObj, err := mapsi2disk.ReadContainerPortsFromDisk(mapsi2disk.GobFilename)
readBackOwnedContainers := readObj.(map[string]int) | random_line_split |
|
dockerapi.go | client when launching and holds on to it for all API interactions.
// This should be contrasted with the stateless approach of requesting a new client for any API interaction.
func GetDockerClient() *client.Client {
ctx := context.Background()
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
log.Panicf("Docker client.NewClientWithOpts error: %s\n", err)
}
dockerClient.NegotiateAPIVersion(ctx)
return dockerClient
}
// BuildDockerImage builds a Docker Image for a given dockerFilePath located in the same folder
// of the running process.
// Upon Error it exits process.
func BuildDockerImage(dockerClient *client.Client, dockerFilePath string) {
tarDockerfileReader, err := archive.TarWithOptions(dockerFilePath, &archive.TarOptions{})
if err != nil {
log.Fatal(err, " :unable to create tar with Dockerfile")
}
log.Printf("Building Docker Image in %q\n", dockerFilePath)
options := types.ImageBuildOptions{
SuppressOutput: false,
Remove: true,
ForceRemove: true,
PullParent: true,
Tags: []string{"mariohellowebserver"},
Dockerfile: "Dockerfile",
}
buildResponse, err := dockerClient.ImageBuild(context.Background(), tarDockerfileReader, options)
if err != nil {
log.Fatal(err, " :unable to read image build response")
}
defer buildResponse.Body.Close()
termFd, isTerm := term.GetFdInfo(os.Stderr)
jsonmessage.DisplayJSONMessagesStream(buildResponse.Body, os.Stderr, termFd, isTerm, nil)
}
// GetContainersLogReaders gets our running containers' log readers.
// Upon failure, it panics.
func (owned OwnedContainers) GetContainersLogReaders(dockerClient *client.Client) []ContainerReaderStream {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerLogStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
readerStream, err := dockerClient.ContainerLogs(context.Background(), container.ID, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
log.Panicf("Unable to solicit a log reader from the container %s, error: %s\n", container.ID, err)
}
containerLogStream := ContainerReaderStream{readerStream, hostPort}
containerLogStreams = append(containerLogStreams, containerLogStream)
}
}
return containerLogStreams
}
// getContainers lists all the containers running on host machine.
func getContainers(dockerClient *client.Client) ([]types.Container, error) {
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Printf("Unable to list containers: %v", err)
return nil, err
}
return containers, nil
}
// CleanLeftOverContainers stops any *owned* live containers.
// Useful in during lauching of containers fails and have to clean up launched instances.
func (owned OwnedContainers)CleanLeftOverContainers(dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if _, ok := owned[container.ID]; ok {
err = dockerClient.ContainerStop(context.Background(), container.ID, nil)
}
}
}
}
// AssertOwnedContainersAreLive lists all the containers running on the host
// and asserts
// 1. Existence of enough live containers
// 2. This process' owned containers are live.
// It panics otherwise.
func (owned OwnedContainers) AssertOwnedContainersAreLive(requestedLiveContainers int, cli *client.Client) error {
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...should be %d containers but found %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers {
if owned[container.ID] > 0 && container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
}
return nil
}
// AssertRequestedContainersAreLive lists all the containers running on the host
// and asserts that the intended containers number is live otherwise it panics.
// This is called by tests. AssertOwnedContainersAreLive is called by default within workflow
func AssertRequestedContainersAreLive(requestedLiveContainers int) {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if requestedLiveContainers > containersCount {
log.Panicf("Not enough containers...Wanted %d containers but got %d.\n", requestedLiveContainers, containersCount)
}
// Assert owned containers are running
for _, container := range containers |
fmt.Printf("\n**** Passed RequestedContainers == Live assertion. ****\n\n")
}
// AssertRequestedContainersAreGone check that no containers exist and that the system cleaned up otherwise it panics.
// This is called by tests.
func AssertRequestedContainersAreGone() {
cli := GetDockerClient()
containers, err := getContainers(cli)
if err != nil {
log.Panicf("Cannot access live containers...\n")
}
containersCount := len(containers)
if containersCount > 0 {
log.Printf("Containers are still alive... Wanted 0 containers but got %d.\n", containersCount)
}
// Assert owned containers are not running
for _, container := range containers {
log.Panicf("Found container %s with state %s, status %s.\n", container.ID, container.State, container.Status)
}
fmt.Printf("\n**** Passed RequestedContainers == 0 assertion. ****\n\n")
}
// GetContainersStatsReaders gets our running containers' resources readers
// Upon error it panics.
func (owned OwnedContainers) GetContainersStatsReaders(dockerClient *client.Client) []ContainerReaderStream {
dockerClient = GetDockerClient()
containers, err := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
log.Panicf("Unable to list containers. Error: %v", err)
}
containerStatsStreams := []ContainerReaderStream{}
for _, container := range containers {
hostPort := owned[container.ID]
if hostPort > 0 && container.State == containerRunningStateString {
out, err := dockerClient.ContainerStats(context.Background(), container.ID, true)
if err != nil {
log.Panicf("Unable to solicit a monitoring reader from the container %s, error: %s\n", container.ID, err)
}
containerStatsStream := ContainerReaderStream{out.Body, hostPort}
containerStatsStreams = append(containerStatsStreams, containerStatsStream)
}
}
return containerStatsStreams
}
// StopAllLiveContainers stops as many live containers as possible
func (owned OwnedContainers) StopAllLiveContainers(terminatorGroup *sync.WaitGroup, dockerClient *client.Client) {
containers, err := getContainers(dockerClient)
if err == nil {
for _, container := range containers {
if owned[container.ID] > 0 {
contID := container.ID
terminatorGroup.Add(1)
go func() {
err = dockerClient.ContainerStop(context.Background(), contID, nil)
if err != nil {
log.Printf("Stopping container failed: %v\n", err)
} else {
log.Printf("Stopped container with ID: %s\n", contID)
}
defer terminatorGroup.Done()
}()
}
}
}
}
// createContainer creates a new container for the dockerImageName
// at the container httpServerContainerPort value.
// and at the host httpServerHostPort value.
// Returns the new container's struct abstraction, error.
// Upon error it panics.
// Credit: https://medium.com/tarkalabs/controlling-the-docker-engine-in-go-826012f9671c
func createContainer(dockerClient *client.Client, dockerImageName string, httpServerContainerPort int, httpServerHostPort int) (container.ContainerCreateCreatedBody, error) {
hostBinding := nat.PortBinding{
HostIP: "0.0.0.0",
HostPort: fmt | {
if container.State == containerRunningStateString {
log.Printf("Container %s in %s state.\n", container.ID, container.State)
} else {
log.Panicf("Found container %s that is not running with state %s, status %s.\n", container.ID, container.State, container.Status)
}
} | conditional_block |
set6.go |
}
func recoverKey(k, H, r, s, q *big.Int) *big.Int {
x := new(big.Int)
r1 := new(big.Int).ModInverse(r, q)
x = x.Mod(x.Mul(x.Sub(x.Mul(s, k), H), r1), q)
return x
}
func problemThree() error {
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
m := []byte("I'm a string")
fmt.Printf("DSA string: %q\n", m)
signature, err := ciphers.DSASign(m, x, params)
if err != nil {
return err
}
fmt.Printf("DSA signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(m, signature, y, params)
if err != nil {
return err
}
fmt.Printf("Verified: %v\n\n", verified)
weakR, _ := new(big.Int).SetString("548099063082341131477253921760299949438196259240", 10)
weakS, _ := new(big.Int).SetString("857042759984254168557880549501802188789837994940", 10)
message := []byte("For those that envy a MC it can be hazardous to your health\nSo be friendly, a matter of life and death, just like a etch-a-sketch\n")
hash, err := utils.HexToBigint(hashes.SHA1(message))
if err != nil {
return err
}
k := new(big.Int)
for i := 1; i <= 65536; i++ {
k = k.SetInt64(int64(i))
r := new(big.Int)
r = r.Mod(r.Exp(params.G, k, params.P), params.Q)
if r.Cmp(weakR) == 0 {
break
}
}
privateKey := recoverKey(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := []string{}
f, err := os.Open(dataDir + input)
if err != nil {
return err
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
str := scanner.Text()
subs := strings.SplitN(str, ": ", 2)
if subs[0] == "m" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 16)
ms = append(ms, n)
} else if subs[0] == "r" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
rs = append(rs, n)
} else if subs[0] == "s" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
ss = append(ss, n)
} else {
msgs = append(msgs, subs[1])
}
}
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
msgCount := len(ms)
k := new(big.Int)
found := false
for i := 0; i < msgCount; i++ {
for j := i; j < msgCount; j++ {
if i == j {
continue
}
if rs[i].Cmp(rs[j]) == 0 {
num := new(big.Int).Sub(ms[i], ms[j])
den := new(big.Int)
den = den.ModInverse(den.Sub(ss[i], ss[j]), params.Q)
k = k.Mod(k.Mul(num, den), params.Q)
hash, err := utils.HexToBigint(hashes.SHA1([]byte(msgs[i])))
if err != nil {
return err
}
privateKey := recoverKey(k, hash, rs[i], ss[i], params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "ca8f6f7c66fa362d40760d135b763eb8527d3d52" {
fmt.Printf("Found key: %v\n", privateKey)
found = true
break
}
}
}
if found {
break
}
}
return nil
}
func problemFive() error {
hello := []byte("Hello, world")
goodbye := []byte("Goodbye, world")
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
params.G = utils.GetBigInt(0)
signature, err := ciphers.DSASign(hello, x, params)
if err != nil {
return err
}
fmt.Println("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage2(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
bits := server.N.BitLen()
lowerBound := utils.GetBigInt(0)
upperBound := new(big.Int).Set(server.N)
multiplier := utils.GetBigInt(2)
two := utils.GetBigInt(2)
printInline(upperBound.Bytes())
for i := 0; i < bits; i++ {
even, err := server.CheckIsEven(rsaMul(c, multiplier, server))
if err != nil |
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Interval struct {
Lower *big.Int
Upper *big.Int
B *big.Int
TwoB *big.Int
ThreeB | {
return err
} | conditional_block |
set6.go | Key(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := []string{}
f, err := os.Open(dataDir + input)
if err != nil {
return err
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
str := scanner.Text()
subs := strings.SplitN(str, ": ", 2)
if subs[0] == "m" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 16)
ms = append(ms, n)
} else if subs[0] == "r" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
rs = append(rs, n)
} else if subs[0] == "s" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
ss = append(ss, n)
} else {
msgs = append(msgs, subs[1])
}
}
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
msgCount := len(ms)
k := new(big.Int)
found := false
for i := 0; i < msgCount; i++ {
for j := i; j < msgCount; j++ {
if i == j {
continue
}
if rs[i].Cmp(rs[j]) == 0 {
num := new(big.Int).Sub(ms[i], ms[j])
den := new(big.Int)
den = den.ModInverse(den.Sub(ss[i], ss[j]), params.Q)
k = k.Mod(k.Mul(num, den), params.Q)
hash, err := utils.HexToBigint(hashes.SHA1([]byte(msgs[i])))
if err != nil {
return err
}
privateKey := recoverKey(k, hash, rs[i], ss[i], params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "ca8f6f7c66fa362d40760d135b763eb8527d3d52" {
fmt.Printf("Found key: %v\n", privateKey)
found = true
break
}
}
}
if found {
break
}
}
return nil
}
func problemFive() error {
hello := []byte("Hello, world")
goodbye := []byte("Goodbye, world")
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
params.G = utils.GetBigInt(0)
signature, err := ciphers.DSASign(hello, x, params)
if err != nil {
return err
}
fmt.Println("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage2(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
bits := server.N.BitLen()
lowerBound := utils.GetBigInt(0)
upperBound := new(big.Int).Set(server.N)
multiplier := utils.GetBigInt(2)
two := utils.GetBigInt(2)
printInline(upperBound.Bytes())
for i := 0; i < bits; i++ {
even, err := server.CheckIsEven(rsaMul(c, multiplier, server))
if err != nil {
return err
}
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Interval struct {
Lower *big.Int
Upper *big.Int
B *big.Int
TwoB *big.Int
ThreeB *big.Int
ThreeBSub1 *big.Int
}
func initialInterval(n *big.Int) *Interval {
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
B := new(big.Int).Exp(two, utils.GetBigInt(n.BitLen()-16), nil)
TwoB := new(big.Int).Mul(two, B)
ThreeB := new(big.Int).Mul(three, B)
ThreeBSub1 := new(big.Int).Sub(ThreeB, utils.GetBigInt(1))
return &Interval{TwoB, ThreeBSub1, B, TwoB, ThreeB, ThreeBSub1}
}
func searchS(s0, c *big.Int, server *secrets.RSAServer) (*big.Int, error) {
s1 := new(big.Int).Set(s0)
one := utils.GetBigInt(1)
var err error
valid := false
for !valid {
c1 := rsaMul(c, s1, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, err
}
if valid {
return s1, nil
}
s1 = s1.Add(s1, one)
}
return s1, nil
}
func searchRS(s0, c *big.Int, interval *Interval, server *secrets.RSAServer) (*big.Int, *big.Int, error) | {
one := utils.GetBigInt(1)
r := new(big.Int)
r = ceilDiv(r.Mul(utils.GetBigInt(2), r.Sub(r.Mul(interval.Upper, s0), interval.TwoB)), server.N)
s := new(big.Int)
minS := new(big.Int)
maxS := new(big.Int)
var err error
valid := false
for r.Cmp(server.N) == -1 {
rn := new(big.Int).Mul(r, server.N)
minS = minS.Div(minS.Add(interval.TwoB, rn), interval.Upper)
maxS = maxS.Div(maxS.Add(interval.ThreeB, rn), interval.Lower)
for s.Set(minS); s.Cmp(maxS) == -1; s.Add(s, one) {
c1 := rsaMul(c, s, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, nil, err | identifier_body |
|
set6.go | ("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, err := secrets.NewRSAServer(1024)
if err != nil {
return err
}
ciphertext, err := secrets.GetClientMessage2(server)
if err != nil {
return err
}
c, err := utils.HexToBigint(ciphertext)
if err != nil {
return err
}
bits := server.N.BitLen()
lowerBound := utils.GetBigInt(0)
upperBound := new(big.Int).Set(server.N)
multiplier := utils.GetBigInt(2)
two := utils.GetBigInt(2)
printInline(upperBound.Bytes())
for i := 0; i < bits; i++ {
even, err := server.CheckIsEven(rsaMul(c, multiplier, server))
if err != nil {
return err
}
middle := new(big.Int).Add(lowerBound, upperBound)
middle = middle.Div(middle, two)
if even {
upperBound = middle
} else {
lowerBound = middle
}
time.Sleep(5 * time.Millisecond)
printInline(upperBound.Bytes())
multiplier = multiplier.Mul(multiplier, two)
}
fmt.Println()
return nil
}
type Interval struct {
Lower *big.Int
Upper *big.Int
B *big.Int
TwoB *big.Int
ThreeB *big.Int
ThreeBSub1 *big.Int
}
func initialInterval(n *big.Int) *Interval {
two := utils.GetBigInt(2)
three := utils.GetBigInt(3)
B := new(big.Int).Exp(two, utils.GetBigInt(n.BitLen()-16), nil)
TwoB := new(big.Int).Mul(two, B)
ThreeB := new(big.Int).Mul(three, B)
ThreeBSub1 := new(big.Int).Sub(ThreeB, utils.GetBigInt(1))
return &Interval{TwoB, ThreeBSub1, B, TwoB, ThreeB, ThreeBSub1}
}
func searchS(s0, c *big.Int, server *secrets.RSAServer) (*big.Int, error) {
s1 := new(big.Int).Set(s0)
one := utils.GetBigInt(1)
var err error
valid := false
for !valid {
c1 := rsaMul(c, s1, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, err
}
if valid {
return s1, nil
}
s1 = s1.Add(s1, one)
}
return s1, nil
}
func searchRS(s0, c *big.Int, interval *Interval, server *secrets.RSAServer) (*big.Int, *big.Int, error) {
one := utils.GetBigInt(1)
r := new(big.Int)
r = ceilDiv(r.Mul(utils.GetBigInt(2), r.Sub(r.Mul(interval.Upper, s0), interval.TwoB)), server.N)
s := new(big.Int)
minS := new(big.Int)
maxS := new(big.Int)
var err error
valid := false
for r.Cmp(server.N) == -1 {
rn := new(big.Int).Mul(r, server.N)
minS = minS.Div(minS.Add(interval.TwoB, rn), interval.Upper)
maxS = maxS.Div(maxS.Add(interval.ThreeB, rn), interval.Lower)
for s.Set(minS); s.Cmp(maxS) == -1; s.Add(s, one) {
c1 := rsaMul(c, s, server)
valid, err = server.PKCS15Valid(c1)
if err != nil {
return nil, nil, err
}
if valid {
return r, s, nil
}
}
r = r.Add(r, one)
}
return nil, nil, errors.New("could not find parameters")
}
func nextInterval(interval *Interval, s, r, n *big.Int) *Interval {
rn := new(big.Int).Mul(r, n)
a1 := new(big.Int)
a1 = ceilDiv(a1.Add(interval.TwoB, rn), s)
b1 := new(big.Int)
b1 = b1.Div(b1.Add(interval.ThreeBSub1, rn), s)
var newInt Interval
newInt = *interval
if interval.Lower.Cmp(a1) == -1 {
newInt.Lower = a1
}
if interval.Upper.Cmp(b1) == 1 {
newInt.Upper = b1
}
return &newInt
}
func ceilDiv(x, y *big.Int) *big.Int {
mod := new(big.Int)
zero := utils.GetBigInt(0)
z, mod := new(big.Int).DivMod(x, y, mod)
if mod.Cmp(zero) != 0 {
z = z.Add(z, utils.GetBigInt(1))
}
return z
}
func allIntervals(currentSet []*Interval, s, n *big.Int) []*Interval {
one := utils.GetBigInt(1)
newSet := []*Interval{}
for i := 0; i < len(currentSet); i++ {
bounds := currentSet[i]
minBound := new(big.Int)
minBound = ceilDiv(minBound.Sub(minBound.Mul(bounds.Lower, s), bounds.ThreeBSub1), n)
maxBound := new(big.Int)
maxBound = maxBound.Div(maxBound.Sub(maxBound.Mul(bounds.Upper, s), bounds.TwoB), n)
r := minBound
for r.Cmp(maxBound) != 1 {
next := nextInterval(bounds, s, r, n)
newSet = append(newSet, next)
r = r.Add(r, one)
}
}
return newSet
}
func printIntervals(set []*Interval) {
printInline(set[0].Lower.Bytes())
}
func bleichbacherAttack(c *big.Int, server *secrets.RSAServer) ([]byte, error) {
one := utils.GetBigInt(1)
bounds := initialInterval(server.N)
minS := new(big.Int)
minS = minS.Div(server.N, bounds.ThreeB)
s, err := searchS(minS, c, server)
if err != nil {
return nil, err
}
intervalSet := []*Interval{bounds}
for i := 0; i < 5000; i++ {
intervalSet = allIntervals(intervalSet, s, server.N)
for j := 0; j < len(intervalSet); j++ {
if intervalSet[j].Lower.Cmp(intervalSet[j].Upper) == 0 {
return intervalSet[j].Lower.Bytes(), nil
}
}
printIntervals(intervalSet)
if len(intervalSet) > 1 {
minS = minS.Add(s, one)
s, err = searchS(minS, c, server)
if err != nil {
return nil, err
} | } else {
_, s, err = searchRS(s, c, bounds, server)
if err != nil {
return nil, err | random_line_split |
|
set6.go | () error {
e, d, n, err := ciphers.RSAKeygen(1024)
if err != nil {
return err
}
plaintext := []byte("hi mom")
signature, err := ciphers.PKCS15Sign(plaintext, d, n)
fmt.Printf("Valid Signature: %s\n", signature)
verified := ciphers.PKCS15Verify(plaintext, signature, e, n)
fmt.Printf("Verified: %t\n", verified)
hash, err := hex.DecodeString(hashes.SHA1(plaintext))
if err != nil {
return err
}
padding := utils.MakeRepeatChar('\xff', 10)
padded := append([]byte("\x00\x01"), padding...)
padded = append(padded, '\x00')
padded = append(padded, hash...)
padded = append(padded, utils.MakeRepeatChar('\x00', 95)...)
x := new(big.Int).SetBytes(padded)
y := cubeRoot(x)
y = y.Add(y, utils.GetBigInt(1)) // overestimation > underestimation
forgery := hex.EncodeToString(y.Bytes())
fmt.Printf("Forged Signature: %s\n", forgery)
verified = ciphers.PKCS15Verify(plaintext, forgery, e, n)
fmt.Printf("Verified: %t\n", verified)
return nil
}
func recoverKey(k, H, r, s, q *big.Int) *big.Int {
x := new(big.Int)
r1 := new(big.Int).ModInverse(r, q)
x = x.Mod(x.Mul(x.Sub(x.Mul(s, k), H), r1), q)
return x
}
func problemThree() error {
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
m := []byte("I'm a string")
fmt.Printf("DSA string: %q\n", m)
signature, err := ciphers.DSASign(m, x, params)
if err != nil {
return err
}
fmt.Printf("DSA signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(m, signature, y, params)
if err != nil {
return err
}
fmt.Printf("Verified: %v\n\n", verified)
weakR, _ := new(big.Int).SetString("548099063082341131477253921760299949438196259240", 10)
weakS, _ := new(big.Int).SetString("857042759984254168557880549501802188789837994940", 10)
message := []byte("For those that envy a MC it can be hazardous to your health\nSo be friendly, a matter of life and death, just like a etch-a-sketch\n")
hash, err := utils.HexToBigint(hashes.SHA1(message))
if err != nil {
return err
}
k := new(big.Int)
for i := 1; i <= 65536; i++ {
k = k.SetInt64(int64(i))
r := new(big.Int)
r = r.Mod(r.Exp(params.G, k, params.P), params.Q)
if r.Cmp(weakR) == 0 {
break
}
}
privateKey := recoverKey(k, hash, weakR, weakS, params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "0954edd5e0afe5542a4adf012611a91912a3ec16" {
fmt.Printf("Found key: %v\n", privateKey)
}
return nil
}
func problemFour(input string) error {
rs := []*big.Int{}
ss := []*big.Int{}
ms := []*big.Int{}
msgs := []string{}
f, err := os.Open(dataDir + input)
if err != nil {
return err
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
str := scanner.Text()
subs := strings.SplitN(str, ": ", 2)
if subs[0] == "m" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 16)
ms = append(ms, n)
} else if subs[0] == "r" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
rs = append(rs, n)
} else if subs[0] == "s" {
n, _ := new(big.Int).SetString(strings.TrimSpace(subs[1]), 10)
ss = append(ss, n)
} else {
msgs = append(msgs, subs[1])
}
}
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
msgCount := len(ms)
k := new(big.Int)
found := false
for i := 0; i < msgCount; i++ {
for j := i; j < msgCount; j++ {
if i == j {
continue
}
if rs[i].Cmp(rs[j]) == 0 {
num := new(big.Int).Sub(ms[i], ms[j])
den := new(big.Int)
den = den.ModInverse(den.Sub(ss[i], ss[j]), params.Q)
k = k.Mod(k.Mul(num, den), params.Q)
hash, err := utils.HexToBigint(hashes.SHA1([]byte(msgs[i])))
if err != nil {
return err
}
privateKey := recoverKey(k, hash, rs[i], ss[i], params.Q)
keyHash := hashes.SHA1([]byte(privateKey.Text(16)))
if keyHash == "ca8f6f7c66fa362d40760d135b763eb8527d3d52" {
fmt.Printf("Found key: %v\n", privateKey)
found = true
break
}
}
}
if found {
break
}
}
return nil
}
func problemFive() error {
hello := []byte("Hello, world")
goodbye := []byte("Goodbye, world")
params, err := ciphers.NewDSAParams()
if err != nil {
return err
}
x, y, err := ciphers.DSAKeygen(params)
if err != nil {
return err
}
params.G = utils.GetBigInt(0)
signature, err := ciphers.DSASign(hello, x, params)
if err != nil {
return err
}
fmt.Println("g = 0")
fmt.Printf("Signature: r:%v s:%v \n", signature.R, signature.S)
verified, err := ciphers.DSAVerify(hello, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, signature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n\n", verified)
fmt.Println("g = p + 1")
params.G = params.G.Add(params.P, utils.GetBigInt(1))
z := utils.GetBigInt(10)
r := new(big.Int)
r = r.Mod(r.Exp(y, z, params.P), params.Q)
s := new(big.Int)
s = s.Mod(s.Mul(r, s.ModInverse(z, params.Q)), params.Q)
badSignature := &ciphers.DSASignature{R: r, S: s}
fmt.Printf("Signature: r:%v s:%v \n", badSignature.R, badSignature.S)
verified, err = ciphers.DSAVerify(hello, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Hello' verified: %v\n", verified)
verified, err = ciphers.DSAVerify(goodbye, badSignature, y, params)
if err != nil {
return err
}
fmt.Printf("'Goodbye' verified: %v\n", verified)
return nil
}
func printInline(b []byte) {
for i := 0; i < len(b); i++ {
if int(b[i]) < 32 {
b[i] = byte('?')
}
}
b = append(b, utils.MakeRepeatChar(' ', 30)...)
fmt.Printf("%s\r", b)
}
func rsaMul(c, multiplier *big.Int, server *secrets.RSAServer) string {
cPrime := new(big.Int)
cPrime = cPrime.Mod(cPrime.Mul(c, cPrime.Exp(multiplier, server.E, server.N)), server.N)
return hex.EncodeToString(cPrime.Bytes())
}
func problemSix() error {
server, | problemTwo | identifier_name |
|
OrganizationListDetail.ts | 、干部干事培训和考核、人才互荐交流、总结晚会等活动;负责与各学院的学生科创组织保持紧密联系,开展交流活动;负责与兄弟高校的学生科创组织进行交流合作。'
},
{
name: '科创竞赛部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-jingsai.png)`
},
introduction:
'科创竞赛部是学生科技联合会的竞技中心。以“科创点亮未来,竞赛成就梦想”为理念,主要负责开展和推广校内外科技竞赛活动,如“科普先锋秀”、“无线电猎狐大赛”等;组织“挑战杯”(大挑)大学生课外学术科技作品竞赛、“创青春”(小挑)全国大学生创业大赛的申报和立项工作。致力于为全校提供一个校内外公平竞技的平台,营造良好的校园竞技氛围。'
},
{
name: '项目管理部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-guanli.png)`
},
introduction:
'项目管理部是学生科联的科创中心,主要负责学生科联的创新、创业工作。负责协调开展“学生科技节”等系列大型科技活动,包括组织开展“学生课外学术科技作品竞赛”、“创新创业训练营”、“学生创新创业成果展”等系列活动。同时致力于“创新高端论坛”校内外创新创业沙龙活动、运用网络媒体发布校内外科创赛事资讯等,构建学校创新创业项目交流以及推广的平台,营造浓厚的科技创新创业氛围。'
},
{
name: '科技人文部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-renwen.png)`
},
introduction:
'科技人文部作为学生科技联合会的文化活动中心,秉承着“科技点缀生活,人文融入梦想”的理念,以“文峰青年大讲堂”、“重邮青年说”、“学长演播厅”为主打品牌活动。文峰青年大讲堂诚邀知名专家学者和文化名人,旨在浓厚校园科技文化氛围的同时,强化人文环境,打造属于重邮的专属讲堂。重邮青年说旨在寻找和培养一批敢于发声,说出自己对生活的感悟的重邮人,传播年轻正能量。学长演播厅邀请优秀学长学姐,为新生答疑解惑,力求将最新最热最有用的大学资讯和成功经验分享给重邮学子。'
},
{
name: '信息部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-xinxi.png)`
},
introduction:
'信息部是学生科联的技术中心。其主要负责科技创新实践活动的培训与开展。Web组主要负责Html5的开发及Web前端的基础培训,静态网页与动态网页的制作;UI组负责对网页整体界面美观、人机交互、操作逻辑的设计;运营组主要负责利用PowerPoint和Premiere等软件来进行产品运营以及宣传。致力于“培养精英团队”,打造科联信息化平台,丰富科联的创新创意活动。'
},
{
name: '媒体运营部',
posterStyle: {
backgroundImage: `url(${imgHostname}kelian/kelian-meiti.png)`
},
introduction:
'媒体运营部是学生科联的创意中心。其主要负责学生科联创意设计及校内外宣传工作。设计海报、条漫及展板,活动现场布置等;制作活动视频、微电影和动画以及活动现场摄影摄像及新闻稿的撰写。同时也负责学生科联线上的运营工作,管理科联公众号的推广,通过海报、视频、网络等形式在校内外宣传科联活动,打响科联品牌,展示科联成果。'
}
]
},
shelian: {
name: '学生社团联合会',
departmentList: [
{
name: '综合部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-zonghe.png)`
},
introduction:
'作为我校学生社团联合会的管家部门,是一个沟通组织内部、联系组织外部的桥梁性部门。综合部,对内负责社联物资管理、各部门考勤考核以及财务报账问题,解决社联体系里的琐事,组织内部四个部门团建,协助监督其他部门完成相应任务。对外,掌握我校社联对外高校的交流与联系,为给树立我校社联树立一个良好的形象做出不少努力。部门更注重的是带动学校各个社团的发展,时刻监督并管理着社团的充分运行。'
},
{
name: '社团活动部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-huodong.png)`
},
introduction:
'社团活动部作为学生社团联合会直属四部门之一是一个以社团活动为主的特殊部门。社团活动部通过对各个社团不同的特点,针对不同类别的社团策划、组织、开展有助于社团发展的活动,社团活动部更侧重于组织开展新颖且独具特色的社团活动,同时承办各部门共同举办校级大型活动,丰富校园文化。'
},
{
name: '社团服务部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-fuwu.png)`
},
introduction:
'社团服务部服务部作为社联必不可少的四大部门之一,有着社团小帮手的称号。主要的工作职责是管理社团,是社联连接社团、社团部的重要桥梁,组织着社团的成立、招新、换届以及社团的评奖评优等工作。社团服务部相比其他三个更注重的是带动学校各个社团的发展,时刻监督并管理着社团的充分运行。'
},
{
name: '宣传部',
posterStyle: {
backgroundImage: `url(${imgHostname}shelian/shelian-xuanchuan.png)`
},
introduction:
'宣传部作为学生社团联合会下的直属部门,主要负责相关活动前期的宣传推广工作,设计宣传推送、活动海报、内容视频等,以使后期活动能够顺利开展,并达到预期效果。同时负责审批社团的活动海报、视频等相关文化制品。并参与运营和搭建社联新媒体的学生社团宣传平台,更新宣传方法,加大宣传力度,拓宽宣传受众面。致力于使更多的同学了解并参与各个社团以及其组织的相关活动,丰富同学们的课余生活。'
},
]
},
yanhui: {
name: '研究生会',
departmentList: [
{
name: '科技实践部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-keji.jpg)`
},
introduction:
'我们将致力于创办与科技相关的赛事活动,如主办全校研究生英语风采大赛、协办各项科技竞赛、参管研究生辩论队。为我校研究生打开有创新、有思想、有趣味的新世界大门。'
},
{
name: '信息宣传部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-xinxuan.jpg)`
},
introduction:
'负责公众号运营,研会线上、线下宣传产品设计,宣传片、视频等多媒体作品制作以及其他宣传工作,对研会各个活动进行品牌包装和技术支持。让我们告别枯燥的海报制作,轻松掌握新媒体运营技巧。在信宣,技术不是关键,脑洞征服世界。'
},
{
name: '外联部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-wailian.jpg)`
},
introduction:
'主管企业俱乐部,负责全国各兄弟院校之间、本校研究生与企业等单位之间、校内各组织间的沟通与交流。各高校知名专家学者,重邮知名校友校企,社会知名人士都是你沟通的对象。'
},
{
name: '自律权益部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-zilv.jpg)`
},
introduction:
'这里是学生实现自我管理的重要平台,我们要配合学校管理日常纪律、维护公共秩序,还要协助学生宿舍安全卫生检查工作。我们的目标是:为全校研究生营造安全、文明、舒适的学习和生活环境。'
},
{
name: '人力资源部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-renli.jpg)`
},
introduction:
'掌握研会命脉,聆听各部心声。负责研会的人事管理、活动记录和物资进出与调度。长期的工作中以严谨高效的作风,根据研会章程,制定活动规范与考核制度。主办新老生交流会及素质拓展等活动,加强研会内部交流融合。人力资源部,团结研会力量,做实力HR !'
},
{
name: '文体部',
posterStyle: {
backgroundImage: `url(${imgHostname}yanhui/yanhui-wenti.jpg)`
},
| introduction:
'研究生校园文化生活的缔造者和领跑人。于文,主办迎新晚会等大型活动,丰富研究生的课余生活,协助各分研会举办各类文艺活动,营造活跃向上的氛围。于体,参与组建、管理研究生各类球队,积极参加各类校级比赛,如运动会、“青春杯”篮球、足球赛、公园排球赛、校园马拉松等,宣传体育育人理念,提高研究生的综合素质。'
},
{
| random_line_split |
|
dominogame.go | dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.ownedPieces, game.pieces[r])
firstTurn, highestDouble = firstMove(game.pieces[r], highestDouble, firstTurn, player.playerNumber)
game.pieces = remove(game.pieces, r)
}
game.players[k] = player
} | }
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoPiece, i int) []dominoPiece {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func generateTurnOrder(firstMove int, players []player) (turnOrder []int) {
turnOrder = append(turnOrder, firstMove)
for _, player := range players {
if player.playerNumber != firstMove {
turnOrder = append(turnOrder, player.playerNumber)
}
}
return
}
func (game *dominoGame) playGame() {
firstTurn := true
var pickedPiece int
var newOwnedPieces []dominoPiece
var newGrid dominoGrid
for {
//players place their pieces down in specific turns
for _, playerNum := range game.turnOrder {
printGrid(game.grid)
if firstTurn {
//have to place the highest doubles piece for the first turn
highestDouble := getHighestDouble(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) {
fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
case 3:
if newGrid | if firstTurn != 0 {
return game, firstTurn
} | random_line_split |
dominogame.go | inoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.ownedPieces, game.pieces[r])
firstTurn, highestDouble = firstMove(game.pieces[r], highestDouble, firstTurn, player.playerNumber)
game.pieces = remove(game.pieces, r)
}
game.players[k] = player
}
if firstTurn != 0 {
return game, firstTurn
}
}
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoPiece, i int) []dominoPiece {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func generateTurnOrder(firstMove int, players []player) (turnOrder []int) {
turnOrder = append(turnOrder, firstMove)
for _, player := range players {
if player.playerNumber != firstMove {
turnOrder = append(turnOrder, player.playerNumber)
}
}
return
}
func (game *dominoGame) playGame() {
firstTurn := true
var pickedPiece int
var newOwnedPieces []dominoPiece
var newGrid dominoGrid
for {
//players place their pieces down in specific turns
for _, playerNum := range game.turnOrder {
printGrid(game.grid)
if firstTurn {
//have to place the highest doubles piece for the first turn
highestDouble := getHighestDouble(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) | fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
case 3:
if newGrid | {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) { | identifier_body |
dominogame.go | (gameRaw dominoGame) (dominoGame, int) {
var firstTurn, highestDouble int
//pieces will be reshuffled if nobody starts with a double
for {
game := gameRaw
//assign domino pieces to players
for k, player := range game.players {
for i := 1; i <= 7; i++ {
r := rand.Intn(len(game.pieces) - 1)
player.ownedPieces = append(player.ownedPieces, game.pieces[r])
firstTurn, highestDouble = firstMove(game.pieces[r], highestDouble, firstTurn, player.playerNumber)
game.pieces = remove(game.pieces, r)
}
game.players[k] = player
}
if firstTurn != 0 {
return game, firstTurn
}
}
}
//determining which player places the first piece
func firstMove(piece dominoPiece, highestDouble, firstTurn, playerNum int) (int, int) {
if (piece.top == piece.bot) && (piece.top > highestDouble) {
firstTurn = playerNum
highestDouble = piece.top
}
return firstTurn, highestDouble
}
func remove(s []dominoPiece, i int) []dominoPiece {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
func generateTurnOrder(firstMove int, players []player) (turnOrder []int) {
turnOrder = append(turnOrder, firstMove)
for _, player := range players {
if player.playerNumber != firstMove {
turnOrder = append(turnOrder, player.playerNumber)
}
}
return
}
func (game *dominoGame) playGame() {
firstTurn := true
var pickedPiece int
var newOwnedPieces []dominoPiece
var newGrid dominoGrid
for {
//players place their pieces down in specific turns
for _, playerNum := range game.turnOrder {
printGrid(game.grid)
if firstTurn {
//have to place the highest doubles piece for the first turn
highestDouble := getHighestDouble(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) {
fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
| assignPieces | identifier_name |
|
dominogame.go | Double(game.players)
fmt.Println("Player ", playerNum, " starts first with their highest double.")
game.players[playerNum-1].ownedPieces, game.grid = placePiece(highestDouble, game.players[playerNum-1].ownedPieces, game.grid, true)
if game.players[playerNum-1].ownedPieces == nil {
fmt.Println("Error placing piece. This line of code should never be reached.")
continue
}
firstTurn = false
} else {
//does the player have any viable pieces?
viablePiece := false
for _, piece := range game.players[playerNum-1].ownedPieces {
if checkPiece(piece, game.grid) {
viablePiece = true
break
}
}
//take from boneyard if no viable piece in player's hand
if !viablePiece && len(game.pieces) > 0 {
fmt.Println("No viable piece in player ", playerNum, "'s hand. Select piece from the boneyard.")
for {
for k2, piece := range game.pieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.pieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.pieces)-1)
continue
}
break
}
}
if !viablePiece && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " cannot make a move this turn as they have no viable pieces and the boneyard is empty.")
continue
}
for {
fmt.Println("Player ", playerNum, " select a piece.")
//print out pieces in a list, select from 1-numPieces
for k2, piece := range game.players[playerNum-1].ownedPieces {
fmt.Println(k2, " - ", piece)
}
fmt.Scan(&pickedPiece)
if pickedPiece < 0 || pickedPiece > len(game.players[playerNum-1].ownedPieces)-1 {
fmt.Println("Invalid selection. Pick a number from 0 to ", len(game.players[playerNum-1].ownedPieces)-1)
continue
}
//proposed piece placement
newOwnedPieces, newGrid = placePiece(game.players[playerNum-1].ownedPieces[pickedPiece], game.players[playerNum-1].ownedPieces, game.grid, false)
if newOwnedPieces == nil {
fmt.Println("Selected piece not valid. Pick a different piece.")
continue
}
break
}
//place piece on the grid
game.grid = newGrid
game.players[playerNum-1].ownedPieces = newOwnedPieces
}
//check win conditions
if len(game.players[playerNum-1].ownedPieces) == 0 && len(game.pieces) == 0 {
fmt.Println("Player ", playerNum, " wins!")
return
}
}
}
}
func getHighestDouble(players []player) dominoPiece {
var max dominoPiece
for _, player := range players {
for _, piece := range player.ownedPieces {
if piece.top == piece.bot {
if piece.top > max.top {
max = piece
}
}
}
}
return max
}
func placePiece(piece dominoPiece, playerPieces []dominoPiece, grid dominoGrid, firstTurn bool) ([]dominoPiece, dominoGrid) {
var x, y, ori int
var end2 string
var newGrid dominoGrid
//check viability of piece selected
if !checkPiece(piece, grid) && !firstTurn {
return nil, newGrid
}
//select which end of piece to place first
end := selectPieceEnd(piece)
//select square for end to go
for {
newGrid = grid
printGrid(newGrid)
//get x axis of grid
for {
fmt.Println("Type x-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&x)
if x > len(newGrid.grid[0]) {
fmt.Println("x too large. Grid is currently ", len(newGrid.grid[0]), " squares long.")
continue
}
if x < 1 {
fmt.Println("x too small. Start from 1.")
continue
}
break
}
//get y axis of grid
for {
fmt.Println("Type y-axis for end to go. (starting from 1 at top left)")
fmt.Scan(&y)
if y > len(newGrid.grid) {
fmt.Println("y too large. Grid is currently ", len(newGrid.grid), " squares long.")
continue
}
if y < 1 {
fmt.Println("y too small. Start from 1.")
continue
}
break
}
//check if space already occupied
if isSpaceAlreadyOccupied(newGrid, x, y) {
continue
}
//check if space is next to equivalent end
if !isSpaceNextToEquivalentEnd(newGrid, y, x, end) && !firstTurn {
continue
}
//place end
newGrid.grid[y-1][x-1] = end
//if end coordinates are on the edge of the grid, expand grid
fmt.Println("=== y: ", y, "x: ", x)
if y == 1 {
expandGrid("top", newGrid)
}
if x == len(newGrid.grid) {
expandGrid("right", newGrid)
}
if y == len(newGrid.grid[0]) {
expandGrid("bot", newGrid)
}
if x == 1 {
expandGrid("left", newGrid)
}
//get the other end of the domino piece
endInt, _ := strconv.Atoi(end)
if piece.top == endInt {
end2 = strconv.Itoa(piece.bot)
} else {
end2 = strconv.Itoa(piece.top)
}
//get orientation, expand grid if end2 touches the edge of grid
for {
printGrid(newGrid)
fmt.Println("Select orientation. 1-up, 2-right, 3-down, 4-left.")
fmt.Scan(&ori)
switch ori {
case 1:
if newGrid.grid[y-2][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-2][x-1] = end2
if y == 2 {
expandGrid("top", newGrid)
}
break
case 2:
if newGrid.grid[y-1][x] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x] = end2
if x == len(newGrid.grid)-1 {
expandGrid("right", newGrid)
}
break
case 3:
if newGrid.grid[y][x-1] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y][x-1] = end2
if y == len(newGrid.grid[0])-1 {
expandGrid("bot", newGrid)
}
break
case 4:
if newGrid.grid[y-1][x-2] != "X" {
fmt.Println("Space already occupied. Select an empty space.")
continue
}
//place end2
newGrid.grid[y-1][x-2] = end2
if x == 2 {
expandGrid("left", newGrid)
}
break
default:
fmt.Println("Invalid orientation. Select one of the numbers for each side.")
}
break
}
break
}
//overwrite with the new grid
grid = newGrid
printGrid(grid)
//remove piece from owned player pieces
for k, playerPiece := range playerPieces {
if playerPiece == piece {
playerPieces = remove(playerPieces, k)
break
}
}
return playerPieces, grid
}
func checkPiece(piece dominoPiece, grid dominoGrid) bool {
viable := false
for y := 1; y <= len(grid.grid)-2; y++ {
for x := 1; x <= len(grid.grid[0])-2; x++ {
//check if it could be matched with any domino on the board
if grid.grid[y][x] == strconv.Itoa(piece.top) || grid.grid[y][x] == strconv.Itoa(piece.bot) | {
//check if there is room to place
if grid.grid[y+1][x] == "X" || grid.grid[y-1][x] == "X" || grid.grid[y][x+1] == "X" || grid.grid[y][x-1] == "X" {
viable = true
}
} | conditional_block |
|
lib.rs | indexmap;
mod errors;
extern crate xml;
mod builder;
use std::borrow::Cow;
use std::fmt;
use std::io::{Read, Write};
use std::iter::Filter;
use std::slice::{Iter, IterMut};
use std::str::FromStr;
use std::string::ToString; |
use indexmap::IndexMap;
use xml::common::XmlVersion as BaseXmlVersion;
/// Enumeration of XML versions
///
/// This exists solely because `xml-rs`'s `XmlVersion` doesn't implement Debug
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML Version 1.0
Version10,
/// XML Version 1.1
Version11,
}
impl From<BaseXmlVersion> for XmlVersion {
fn from(value: BaseXmlVersion) -> XmlVersion {
match value {
BaseXmlVersion::Version10 => XmlVersion::Version10,
BaseXmlVersion::Version11 => XmlVersion::Version11,
}
}
}
impl From<XmlVersion> for BaseXmlVersion {
fn from(value: XmlVersion) -> BaseXmlVersion {
match value {
XmlVersion::Version10 => BaseXmlVersion::Version10,
XmlVersion::Version11 => BaseXmlVersion::Version11,
}
}
}
/// An XML element
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Element {
/// Tag prefix, used for namespacing: `xsl` in `xsl:for-each`
pub prefix: Option<String>,
/// Tag name: `for-each` in `xsl:for-each`
pub name: String,
/// Tag attributes
pub attributes: IndexMap<String, String>,
/// A vector of child elements
pub children: Vec<Element>,
/// Contents of the element
pub text: Option<String>,
/// CDATA contents of the element
pub cdata: Option<String>,
}
impl Default for Element {
fn default() -> Self {
Element {
prefix: None,
name: "tag".to_owned(),
attributes: IndexMap::new(),
children: Vec::new(),
text: None,
cdata: None,
}
}
}
impl Element {
/// Create a new `Element` with the tag name `name`
pub fn new<S>(name: S) -> Element
where
S: ToString,
{
Element {
name: name.to_string(),
..Element::default()
}
}
/// Parse the contents of an element
fn parse<R: Read>(
&mut self,
mut reader: &mut xml::reader::EventReader<R>,
) -> Result<(), Error> {
use xml::reader::XmlEvent;
loop {
let ev = reader.next()?;
match ev {
XmlEvent::StartElement {
name, attributes, ..
} => {
let mut attr_map = IndexMap::new();
for attr in attributes {
let attr_name = match attr.name.prefix {
Some(prefix) => format!("{}:{}", prefix, attr.name.local_name),
None => attr.name.local_name,
};
attr_map.insert(attr_name, attr.value);
}
let mut child = Element {
prefix: name.prefix,
name: name.local_name,
attributes: attr_map,
..Element::default()
};
child.parse(&mut reader)?;
self.children.push(child);
}
XmlEvent::EndElement { name } => {
if name.prefix == self.prefix && name.local_name == self.name {
return Ok(());
} else {
// This should never happen, since the base xml library will panic first
panic!("Unexpected closing tag: {}, expected {}", name, self.name);
}
}
XmlEvent::Characters(s) => {
let text = match self.text {
Some(ref v) => v.clone(),
None => String::new(),
};
self.text = Some(text + &s);
}
XmlEvent::CData(s) => {
let cdata = match self.cdata {
Some(ref v) => v.clone(),
None => String::new(),
};
self.cdata = Some(cdata + &s);
}
XmlEvent::StartDocument { .. }
| XmlEvent::EndDocument
| XmlEvent::ProcessingInstruction { .. }
| XmlEvent::Whitespace(_)
| XmlEvent::Comment(_) => {}
}
}
}
/// Write an element and its contents to `writer`
fn write<W: Write>(&self, writer: &mut xml::writer::EventWriter<W>) -> Result<(), Error> {
use xml::attribute::Attribute;
use xml::name::Name;
use xml::namespace::Namespace;
use xml::writer::XmlEvent;
let name = Name::local(&self.name);
let mut attributes = Vec::with_capacity(self.attributes.len());
for (k, v) in &self.attributes {
attributes.push(Attribute {
name: Name::local(k),
value: v,
});
}
let namespace = Namespace::empty();
writer.write(XmlEvent::StartElement {
name: name,
attributes: Cow::Owned(attributes),
namespace: Cow::Owned(namespace),
})?;
if let Some(ref text) = self.text {
writer.write(XmlEvent::Characters(&text[..]))?;
}
if let Some(ref cdata) = self.cdata {
writer.write(XmlEvent::CData(&cdata[..]))?;
}
for e in &self.children {
e.write(writer)?;
}
writer.write(XmlEvent::EndElement { name: Some(name) })?;
Ok(())
}
/// Find a single child of the current `Element`, given a predicate
pub fn find_child<P>(&self, predicate: P) -> Option<&Element>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().find(predicate)
}
/// Find a single child of the current `Element`, given a predicate; returns a mutable borrow
pub fn find_child_mut<P>(&mut self, predicate: P) -> Option<&mut Element>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().find(predicate)
}
/// Traverse element using an xpath-like string: root/child/a
pub fn find(&self, path: &str) -> Result<&Element, Error> {
Self::find_path(&path.split('/').collect::<Vec<&str>>(), path, self)
}
pub fn find_value<T: FromStr>(&self, path: &str) -> Result<Option<T>, Error> {
let el = self.find(path)?;
if let Some(text) = el.text.as_ref() {
match T::from_str(text) {
Err(_) => Err(errors::Error::ValueFromStr {
t: text.to_string(),
}.into()),
Ok(value) => Ok(Some(value)),
}
} else {
Ok(None)
}
}
fn find_path<'a>(
path: &[&str],
original: &str,
tree: &'a Element,
) -> Result<&'a Element, Error> {
if path.is_empty() {
return Ok(tree);
}
match tree.find_child(|t| t.name == path[0]) {
Some(element) => Self::find_path(&path[1..], original, element),
None => Err(errors::Error::ElementNotFound { t: original.into() }.into()),
}
}
/// Filters the children of the current `Element`, given a predicate
pub fn filter_children<P>(&self, predicate: P) -> Filter<Iter<Element>, P>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().filter(predicate)
}
/// Filters the children of the current `Element`, given a predicate; returns a mutable iterator
pub fn filter_children_mut<P>(&mut self, predicate: P) -> Filter<IterMut<Element>, P>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().filter(predicate)
}
}
impl fmt::Display for Element {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let doc = Document {
root: Some(self.clone()),
..Document::default()
};
let mut v = Vec::<u8>::new();
doc.write_with(&mut v, false, " ", true).unwrap();
let s = String::from_utf8(v).unwrap();
f.write_str(&s[..])
}
}
/// An XML document
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Document {
/// Version of the XML document
pub version: XmlVersion,
/// Encoding of the XML document
pub encoding: String,
/// Root tag of the XML document
pub root: Option<Element>,
}
impl Default for Document {
fn default() -> Self {
Document {
version: XmlVersion::Version10,
encoding: "UTF-8".to_owned(),
root: None,
}
}
}
impl Document {
/// Create a new `Document` with default values
pub fn new() -> Document {
Document {
..Document::default()
}
}
/// Create a new `Document` |
pub use errors::*;
pub use builder::*; | random_line_split |
lib.rs | crate indexmap;
mod errors;
extern crate xml;
mod builder;
use std::borrow::Cow;
use std::fmt;
use std::io::{Read, Write};
use std::iter::Filter;
use std::slice::{Iter, IterMut};
use std::str::FromStr;
use std::string::ToString;
pub use errors::*;
pub use builder::*;
use indexmap::IndexMap;
use xml::common::XmlVersion as BaseXmlVersion;
/// Enumeration of XML versions
///
/// This exists solely because `xml-rs`'s `XmlVersion` doesn't implement Debug
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum XmlVersion {
/// XML Version 1.0
Version10,
/// XML Version 1.1
Version11,
}
impl From<BaseXmlVersion> for XmlVersion {
fn from(value: BaseXmlVersion) -> XmlVersion {
match value {
BaseXmlVersion::Version10 => XmlVersion::Version10,
BaseXmlVersion::Version11 => XmlVersion::Version11,
}
}
}
impl From<XmlVersion> for BaseXmlVersion {
fn | (value: XmlVersion) -> BaseXmlVersion {
match value {
XmlVersion::Version10 => BaseXmlVersion::Version10,
XmlVersion::Version11 => BaseXmlVersion::Version11,
}
}
}
/// An XML element
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Element {
/// Tag prefix, used for namespacing: `xsl` in `xsl:for-each`
pub prefix: Option<String>,
/// Tag name: `for-each` in `xsl:for-each`
pub name: String,
/// Tag attributes
pub attributes: IndexMap<String, String>,
/// A vector of child elements
pub children: Vec<Element>,
/// Contents of the element
pub text: Option<String>,
/// CDATA contents of the element
pub cdata: Option<String>,
}
impl Default for Element {
fn default() -> Self {
Element {
prefix: None,
name: "tag".to_owned(),
attributes: IndexMap::new(),
children: Vec::new(),
text: None,
cdata: None,
}
}
}
impl Element {
/// Create a new `Element` with the tag name `name`
pub fn new<S>(name: S) -> Element
where
S: ToString,
{
Element {
name: name.to_string(),
..Element::default()
}
}
/// Parse the contents of an element
fn parse<R: Read>(
&mut self,
mut reader: &mut xml::reader::EventReader<R>,
) -> Result<(), Error> {
use xml::reader::XmlEvent;
loop {
let ev = reader.next()?;
match ev {
XmlEvent::StartElement {
name, attributes, ..
} => {
let mut attr_map = IndexMap::new();
for attr in attributes {
let attr_name = match attr.name.prefix {
Some(prefix) => format!("{}:{}", prefix, attr.name.local_name),
None => attr.name.local_name,
};
attr_map.insert(attr_name, attr.value);
}
let mut child = Element {
prefix: name.prefix,
name: name.local_name,
attributes: attr_map,
..Element::default()
};
child.parse(&mut reader)?;
self.children.push(child);
}
XmlEvent::EndElement { name } => {
if name.prefix == self.prefix && name.local_name == self.name {
return Ok(());
} else {
// This should never happen, since the base xml library will panic first
panic!("Unexpected closing tag: {}, expected {}", name, self.name);
}
}
XmlEvent::Characters(s) => {
let text = match self.text {
Some(ref v) => v.clone(),
None => String::new(),
};
self.text = Some(text + &s);
}
XmlEvent::CData(s) => {
let cdata = match self.cdata {
Some(ref v) => v.clone(),
None => String::new(),
};
self.cdata = Some(cdata + &s);
}
XmlEvent::StartDocument { .. }
| XmlEvent::EndDocument
| XmlEvent::ProcessingInstruction { .. }
| XmlEvent::Whitespace(_)
| XmlEvent::Comment(_) => {}
}
}
}
/// Write an element and its contents to `writer`
fn write<W: Write>(&self, writer: &mut xml::writer::EventWriter<W>) -> Result<(), Error> {
use xml::attribute::Attribute;
use xml::name::Name;
use xml::namespace::Namespace;
use xml::writer::XmlEvent;
let name = Name::local(&self.name);
let mut attributes = Vec::with_capacity(self.attributes.len());
for (k, v) in &self.attributes {
attributes.push(Attribute {
name: Name::local(k),
value: v,
});
}
let namespace = Namespace::empty();
writer.write(XmlEvent::StartElement {
name: name,
attributes: Cow::Owned(attributes),
namespace: Cow::Owned(namespace),
})?;
if let Some(ref text) = self.text {
writer.write(XmlEvent::Characters(&text[..]))?;
}
if let Some(ref cdata) = self.cdata {
writer.write(XmlEvent::CData(&cdata[..]))?;
}
for e in &self.children {
e.write(writer)?;
}
writer.write(XmlEvent::EndElement { name: Some(name) })?;
Ok(())
}
/// Find a single child of the current `Element`, given a predicate
pub fn find_child<P>(&self, predicate: P) -> Option<&Element>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().find(predicate)
}
/// Find a single child of the current `Element`, given a predicate; returns a mutable borrow
pub fn find_child_mut<P>(&mut self, predicate: P) -> Option<&mut Element>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().find(predicate)
}
/// Traverse element using an xpath-like string: root/child/a
pub fn find(&self, path: &str) -> Result<&Element, Error> {
Self::find_path(&path.split('/').collect::<Vec<&str>>(), path, self)
}
pub fn find_value<T: FromStr>(&self, path: &str) -> Result<Option<T>, Error> {
let el = self.find(path)?;
if let Some(text) = el.text.as_ref() {
match T::from_str(text) {
Err(_) => Err(errors::Error::ValueFromStr {
t: text.to_string(),
}.into()),
Ok(value) => Ok(Some(value)),
}
} else {
Ok(None)
}
}
fn find_path<'a>(
path: &[&str],
original: &str,
tree: &'a Element,
) -> Result<&'a Element, Error> {
if path.is_empty() {
return Ok(tree);
}
match tree.find_child(|t| t.name == path[0]) {
Some(element) => Self::find_path(&path[1..], original, element),
None => Err(errors::Error::ElementNotFound { t: original.into() }.into()),
}
}
/// Filters the children of the current `Element`, given a predicate
pub fn filter_children<P>(&self, predicate: P) -> Filter<Iter<Element>, P>
where
P: for<'r> Fn(&'r &Element) -> bool,
{
self.children.iter().filter(predicate)
}
/// Filters the children of the current `Element`, given a predicate; returns a mutable iterator
pub fn filter_children_mut<P>(&mut self, predicate: P) -> Filter<IterMut<Element>, P>
where
P: for<'r> FnMut(&'r &mut Element) -> bool,
{
self.children.iter_mut().filter(predicate)
}
}
impl fmt::Display for Element {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let doc = Document {
root: Some(self.clone()),
..Document::default()
};
let mut v = Vec::<u8>::new();
doc.write_with(&mut v, false, " ", true).unwrap();
let s = String::from_utf8(v).unwrap();
f.write_str(&s[..])
}
}
/// An XML document
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Document {
/// Version of the XML document
pub version: XmlVersion,
/// Encoding of the XML document
pub encoding: String,
/// Root tag of the XML document
pub root: Option<Element>,
}
impl Default for Document {
fn default() -> Self {
Document {
version: XmlVersion::Version10,
encoding: "UTF-8".to_owned(),
root: None,
}
}
}
impl Document {
/// Create a new `Document` with default values
pub fn new() -> Document {
Document {
..Document::default()
}
}
/// Create a new `Document` with | from | identifier_name |
metamandering_north_carolina.py | import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def | (partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['population'] = 0
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
z = 0
num_cuts_list = []
seats_won_table = []
best_left = np.inf
best_right = -np.inf
print("begin chain")
for part in exp_chain:
z += 1
if z % 100 == 0:
| step_num | identifier_name |
metamandering_north_carolina.py | .metrics import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
|
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
z = 0
num_cuts_list = []
seats_won_table = []
best_left = np.inf
best_right = -np.inf
print("begin chain")
for part in exp_chain:
z += 1
if z % 100 == 0:
| g_sierpinsky.nodes[node]['population'] = 0 | conditional_block |
metamandering_north_carolina.py | import numpy as np
import copy
from gerrychain.tree import bipartition_tree as bpt
from gerrychain import Graph
from gerrychain import MarkovChain
from gerrychain.constraints import (Validator, single_flip_contiguous,
within_percent_of_ideal_population, UpperBound)
from gerrychain.proposals import propose_random_flip, propose_chunk_flip
from gerrychain.accept import always_accept
from gerrychain.updaters import Election, Tally, cut_edges
from gerrychain import GeographicPartition
from gerrychain.partition import Partition
from gerrychain.proposals import recom
from gerrychain.metrics import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['population'] = 0
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bip | import seaborn as sns
from functools import partial
import networkx as nx | random_line_split |
|
metamandering_north_carolina.py | import mean_median, efficiency_gap
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, contract_leaves_until_balanced_or_none, find_balanced_edge_cuts
def step_num(partition):
parent = partition.parent
if not parent:
return 0
return parent["step_num"] + 1
def always_true(proposal):
return True
def produce_gerrymanders(graph, k, tag, sample_size, chaintype):
#Samples k partitions of the graph
#stores vote histograms, and returns most extreme partitions.
for n in graph.nodes():
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
seats_won_table = []
best_left = np.inf
best_right = -np.inf
ctr = 0
for part in exp_chain:
ctr += 1
seats_won = 0
if ctr % 100 == 0:
print("step ", ctr)
for i in range(k):
rep_votes = 0
dem_votes = 0
for n in graph.nodes():
if part.assignment[n] == i:
rep_votes += graph.nodes[n]["EL16G_PR_R"]
dem_votes += graph.nodes[n]["EL16G_PR_D"]
total_seats = int(rep_votes > dem_votes)
seats_won += total_seats
#total seats won by rep
seats_won_table.append(seats_won)
# save gerrymandered partitionss
if seats_won < best_left:
best_left = seats_won
left_mander = copy.deepcopy(part.parts)
if seats_won > best_right:
best_right = seats_won
right_mander = copy.deepcopy(part.parts)
#print("finished round"
print("max", best_right, "min:", best_left)
#plt.figure()
#plt.hist(seats_won_table, bins = 10)
name = "./plots/seats_histogram" + tag +".png"
#plt.savefig(name)
#plt.close()
sns_plot = sns.distplot(seats_won_table, label="North Carolina Republican Vote Distribution").get_figure()
plt.legend()
sns_plot.savefig(name)
return left_mander, right_mander
def assign_special_faces(graph, k):
special_faces = []
for node in graph.nodes():
if graph.nodes[node]['distance'] >= k:
special_faces.append(node)
return special_faces
def metamander_around_partition(graph, dual, target_partition, tag,num_dist):
| print("made partition")
crosses = compute_cross_edge(graph, target_partition)
k = len(target_partition.parts)
dual_crosses = []
for edge in dual.edges:
if dual.edges[edge]["original_name"] in crosses:
dual_crosses.append(edge)
print("making dual distances")
dual = distance_from_partition(dual, dual_crosses)
print('finished making dual distances')
special_faces = assign_special_faces(dual,2)
print('finished assigning special faces')
g_sierpinsky = face_sierpinski_mesh(graph, special_faces)
print("made metamander")
# change from RVAP and UVAP to approprate election data columns
for node in g_sierpinsky:
g_sierpinsky.nodes[node]['C_X'] = g_sierpinsky.nodes[node]['pos'][0]
g_sierpinsky.nodes[node]['C_Y'] = g_sierpinsky.nodes[node]['pos'][1]
if 'population' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['population'] = 0
if 'EL16G_PR_D' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_D'] = 0
if 'EL16G_PR_R' not in g_sierpinsky.nodes[node]:
g_sierpinsky.nodes[node]['EL16G_PR_R'] = 0
##Need to add the voting data
# Seanna: So it looks like it initialize population and voting data to 0 when create a new node? Is that matter?
print("assigning districts to metamander")
total_pop = sum( [ g_sierpinsky.nodes[node]['population'] for node in g_sierpinsky])
cddict = recursive_tree_part(graph,range(num_dist),total_pop/num_dist,"population", .01,1)
for node in graph.nodes():
graph.nodes[node]['part'] = cddict[node]
#sierp_partition = build_trivial_partition(g_sierpinsky)
print("assigned districts")
plt.figure()
nx.draw(g_sierpinsky, pos=nx.get_node_attributes(g_sierpinsky, 'pos'), node_size = 1, width = 1, cmap=plt.get_cmap('jet'))
plt.title("North Carolina Metamander")
plt.savefig("./plots/sierpinsky_mesh.png", format='png')
plt.close()
return g_sierpinsky, k
def produce_sample(graph, k, tag, sample_size = 500, chaintype='tree'):
#Samples k partitions of the graph, stores the cut edges and records them graphically
#Also stores vote histograms, and returns most extreme partitions.
print("producing sample")
updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
for edge in graph.edges():
graph[edge[0]][edge[1]]['cut_times'] = 0
for n in graph.nodes():
#graph.nodes[n]["population"] = 1 #graph.nodes[n]["POP10"] #This is something gerrychain will refer to for checking population balance
graph.nodes[n]["last_flipped"] = 0
graph.nodes[n]["num_flips"] = 0
print("set up chain")
ideal_population= sum( graph.nodes[x]["population"] for x in graph.nodes())/k
initial_partition = Partition(graph, assignment='part', updaters=updaters)
pop1 = .05
print("popbound")
popbound = within_percent_of_ideal_population(initial_partition, pop1)
if chaintype == "tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col="population", pop_target=ideal_population, epsilon=pop1,
node_repeats=1, method=my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recongized. Use 'tree' or 'uniform_tree' instead")
exp_chain = MarkovChain(tree_proposal, Validator([popbound]), accept=always_true, initial_state=initial_partition, total_steps=sample_size)
z = 0
num_cuts_list = []
seats_won_table = []
best_left = np.inf
best_right = -np.inf
print("begin chain")
for part in exp_chain:
z += 1
if z % 100 == 0:
| updaters = {'population': Tally('population'),
'cut_edges': cut_edges,
'step_num': step_num,
}
assignment = {}
for x in graph.nodes():
color = 0
for block in target_partition.keys():
if x in target_partition[block]:
assignment[x] = color
color += 1
target_partition = Partition(graph, assignment, updaters = updaters)
plt.figure()
viz(graph, set([]), target_partition.parts)
plt.savefig("./plots/target_map" + tag + ".png", format = 'png')
plt.close()
| identifier_body |
component.rs | .iter()
.filter_map(|init| match init {
GlobalInitializer::AlwaysTrap(i) => Some(i),
_ => None,
})
.collect();
let always_trap = engine.run_maybe_parallel(always_trap, |info| {
compiler
.component_compiler()
.compile_always_trap(&types[info.canonical_abi])
})?;
// Compile all "lowerings" which are adapters that go from core wasm
// into the host which will process the canonical ABI.
let lowerings = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i),
_ => None,
})
.collect();
let lowerings = engine.run_maybe_parallel(lowerings, |lowering| {
compiler
.component_compiler()
.compile_lowered_trampoline(&component, lowering, &types)
})?;
// Collect the results of all of the function-based compilations above
// into one large list of functions to get appended into the text
// section of the final module.
let mut funcs = Vec::new();
let mut module_func_start_index = Vec::new();
let mut func_index_to_module_index = Vec::new();
let mut func_infos = Vec::new();
for (i, list) in module_funcs.into_iter().enumerate() {
module_func_start_index.push(func_index_to_module_index.len());
let mut infos = Vec::new();
for (j, (info, func)) in list.into_iter().enumerate() {
func_index_to_module_index.push(i);
let name = format!("_wasm{i}_function{j}");
funcs.push((name, func));
infos.push(info);
}
func_infos.push(infos);
}
for (sig, func) in trampolines.iter().zip(compiled_trampolines) {
let name = format!("_wasm_trampoline{}", sig.as_u32());
funcs.push((name, func));
}
let ntranscoders = transcoders.len();
for (i, func) in transcoders.into_iter().enumerate() {
let name = format!("_wasm_component_transcoder{i}");
funcs.push((name, func));
}
let nalways_trap = always_trap.len();
for (i, func) in always_trap.into_iter().enumerate() {
let name = format!("_wasm_component_always_trap{i}");
funcs.push((name, func));
}
let nlowerings = lowerings.len();
for (i, func) in lowerings.into_iter().enumerate() {
let name = format!("_wasm_component_lowering{i}");
funcs.push((name, func));
}
let mut object = compiler.object(ObjectKind::Component)?;
let locs = compiler.append_code(&mut object, &funcs, tunables, &|i, idx| {
// Map from the `i`th function which is requesting the relocation to
// the index in `modules` that the function belongs to. Using that
// metadata we can resolve `idx: FuncIndex` to a `DefinedFuncIndex`
// to the index of that module's function that's being called.
//
// Note that this will panic if `i` is a function beyond the initial
// set of core wasm module functions. That's intentional, however,
// since trampolines and otherwise should not have relocations to
// resolve.
let module_index = func_index_to_module_index[i];
let defined_index = modules[StaticModuleIndex::new(module_index)]
.module
.defined_func_index(idx)
.unwrap();
// Additionally use the module index to determine where that
// module's list of functions started at to factor in as an offset
// as well.
let offset = module_func_start_index[module_index];
defined_index.index() + offset
})?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
// Disassemble the result of the appending to the text section, where
// each function is in the module, into respective maps.
let mut locs = locs.into_iter().map(|(_sym, loc)| loc);
let funcs = func_infos
.into_iter()
.map(|infos| {
infos
.into_iter()
.zip(&mut locs)
.collect::<PrimaryMap<_, _>>()
})
.collect::<Vec<_>>();
let signature_to_trampoline = trampolines
.iter()
.cloned()
.zip(&mut locs)
.collect::<HashMap<_, _>>();
let transcoders = locs
.by_ref()
.take(ntranscoders)
.collect::<PrimaryMap<RuntimeTranscoderIndex, _>>();
let always_trap = locs
.by_ref()
.take(nalways_trap)
.collect::<PrimaryMap<RuntimeAlwaysTrapIndex, _>>();
let lowerings = locs
.by_ref()
.take(nlowerings)
.collect::<PrimaryMap<LoweredIndex, _>>();
assert!(locs.next().is_none());
// Convert all `ModuleTranslation` instances into `CompiledModuleInfo`
// through an `ObjectBuilder` here. This is then used to create the
// final `mmap` which is the final compilation artifact.
let mut builder = wasmtime_jit::ObjectBuilder::new(object, tunables);
let mut static_modules = PrimaryMap::new();
for ((_, module), funcs) in modules.into_iter().zip(funcs) {
// Build the list of trampolines for this module from its set of
// exported signatures, which is the list of expected trampolines,
// from the set of trampolines that were compiled for everything
// within this component.
let trampolines = module
.exported_signatures
.iter()
.map(|sig| (*sig, signature_to_trampoline[sig]))
.collect();
let info = builder.append(module, funcs, trampolines)?;
static_modules.push(info);
}
let info = CompiledComponentInfo {
always_trap,
component,
lowerings,
trampolines: trampolines
.difference(&module_trampolines)
.map(|i| (*i, signature_to_trampoline[i]))
.collect(),
transcoders,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules,
};
builder.serialize_info(&artifacts);
let mmap = builder.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures = SignatureCollection::new_for_module(
engine.signatures(),
types.module_types(),
static_modules
.iter()
.flat_map(|(_, m)| m.trampolines.iter().copied())
.chain(info.trampolines.iter().copied())
.map(|(sig, loc)| {
let trampoline = code_memory.text()[loc.start as usize..].as_ptr();
(sig, unsafe {
mem::transmute::<*const u8, VMTrampoline>(trampoline)
})
}),
);
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> | {
match self.inner.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
} | identifier_body |
|
component.rs |
.iter()
.filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i),
_ => None,
})
.collect();
let lowerings = engine.run_maybe_parallel(lowerings, |lowering| {
compiler
.component_compiler()
.compile_lowered_trampoline(&component, lowering, &types)
})?;
// Collect the results of all of the function-based compilations above
// into one large list of functions to get appended into the text
// section of the final module.
let mut funcs = Vec::new();
let mut module_func_start_index = Vec::new();
let mut func_index_to_module_index = Vec::new();
let mut func_infos = Vec::new();
for (i, list) in module_funcs.into_iter().enumerate() {
module_func_start_index.push(func_index_to_module_index.len());
let mut infos = Vec::new();
for (j, (info, func)) in list.into_iter().enumerate() {
func_index_to_module_index.push(i);
let name = format!("_wasm{i}_function{j}");
funcs.push((name, func));
infos.push(info);
}
func_infos.push(infos);
}
for (sig, func) in trampolines.iter().zip(compiled_trampolines) {
let name = format!("_wasm_trampoline{}", sig.as_u32());
funcs.push((name, func));
}
let ntranscoders = transcoders.len();
for (i, func) in transcoders.into_iter().enumerate() {
let name = format!("_wasm_component_transcoder{i}");
funcs.push((name, func));
}
let nalways_trap = always_trap.len();
for (i, func) in always_trap.into_iter().enumerate() {
let name = format!("_wasm_component_always_trap{i}");
funcs.push((name, func));
}
let nlowerings = lowerings.len();
for (i, func) in lowerings.into_iter().enumerate() {
let name = format!("_wasm_component_lowering{i}");
funcs.push((name, func));
}
let mut object = compiler.object(ObjectKind::Component)?;
let locs = compiler.append_code(&mut object, &funcs, tunables, &|i, idx| {
// Map from the `i`th function which is requesting the relocation to
// the index in `modules` that the function belongs to. Using that
// metadata we can resolve `idx: FuncIndex` to a `DefinedFuncIndex`
// to the index of that module's function that's being called.
//
// Note that this will panic if `i` is a function beyond the initial
// set of core wasm module functions. That's intentional, however,
// since trampolines and otherwise should not have relocations to
// resolve.
let module_index = func_index_to_module_index[i];
let defined_index = modules[StaticModuleIndex::new(module_index)]
.module
.defined_func_index(idx)
.unwrap();
// Additionally use the module index to determine where that
// module's list of functions started at to factor in as an offset
// as well.
let offset = module_func_start_index[module_index];
defined_index.index() + offset
})?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
// Disassemble the result of the appending to the text section, where
// each function is in the module, into respective maps.
let mut locs = locs.into_iter().map(|(_sym, loc)| loc);
let funcs = func_infos
.into_iter()
.map(|infos| {
infos
.into_iter()
.zip(&mut locs)
.collect::<PrimaryMap<_, _>>()
})
.collect::<Vec<_>>();
let signature_to_trampoline = trampolines
.iter()
.cloned()
.zip(&mut locs)
.collect::<HashMap<_, _>>();
let transcoders = locs
.by_ref()
.take(ntranscoders)
.collect::<PrimaryMap<RuntimeTranscoderIndex, _>>();
let always_trap = locs
.by_ref()
.take(nalways_trap)
.collect::<PrimaryMap<RuntimeAlwaysTrapIndex, _>>();
let lowerings = locs
.by_ref()
.take(nlowerings)
.collect::<PrimaryMap<LoweredIndex, _>>();
assert!(locs.next().is_none());
// Convert all `ModuleTranslation` instances into `CompiledModuleInfo`
// through an `ObjectBuilder` here. This is then used to create the
// final `mmap` which is the final compilation artifact.
let mut builder = wasmtime_jit::ObjectBuilder::new(object, tunables);
let mut static_modules = PrimaryMap::new();
for ((_, module), funcs) in modules.into_iter().zip(funcs) {
// Build the list of trampolines for this module from its set of
// exported signatures, which is the list of expected trampolines,
// from the set of trampolines that were compiled for everything
// within this component.
let trampolines = module
.exported_signatures
.iter()
.map(|sig| (*sig, signature_to_trampoline[sig]))
.collect();
let info = builder.append(module, funcs, trampolines)?;
static_modules.push(info);
}
let info = CompiledComponentInfo {
always_trap,
component,
lowerings,
trampolines: trampolines
.difference(&module_trampolines)
.map(|i| (*i, signature_to_trampoline[i]))
.collect(),
transcoders,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules,
};
builder.serialize_info(&artifacts);
let mmap = builder.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures = SignatureCollection::new_for_module(
engine.signatures(),
types.module_types(),
static_modules
.iter()
.flat_map(|(_, m)| m.trampolines.iter().copied())
.chain(info.trampolines.iter().copied())
.map(|(sig, loc)| {
let trampoline = code_memory.text()[loc.start as usize..].as_ptr();
(sig, unsafe {
mem::transmute::<*const u8, VMTrampoline>(trampoline)
})
}),
);
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
match self.inner.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn text(&self) -> &[u8] {
self.inner.code.code_memory().text()
}
pub(crate) fn lowering_ptr(&self, index: LoweredIndex) -> NonNull<VMFunctionBody> { | let info = &self.inner.info.lowerings[index];
self.func(info)
}
pub(crate) fn always_trap_ptr(&self, index: RuntimeAlwaysTrapIndex) -> NonNull<VMFunctionBody> { | random_line_split |
|
component.rs | ,
})
.collect();
let always_trap = engine.run_maybe_parallel(always_trap, |info| {
compiler
.component_compiler()
.compile_always_trap(&types[info.canonical_abi])
})?;
// Compile all "lowerings" which are adapters that go from core wasm
// into the host which will process the canonical ABI.
let lowerings = component
.initializers
.iter()
.filter_map(|init| match init {
GlobalInitializer::LowerImport(i) => Some(i),
_ => None,
})
.collect();
let lowerings = engine.run_maybe_parallel(lowerings, |lowering| {
compiler
.component_compiler()
.compile_lowered_trampoline(&component, lowering, &types)
})?;
// Collect the results of all of the function-based compilations above
// into one large list of functions to get appended into the text
// section of the final module.
let mut funcs = Vec::new();
let mut module_func_start_index = Vec::new();
let mut func_index_to_module_index = Vec::new();
let mut func_infos = Vec::new();
for (i, list) in module_funcs.into_iter().enumerate() {
module_func_start_index.push(func_index_to_module_index.len());
let mut infos = Vec::new();
for (j, (info, func)) in list.into_iter().enumerate() {
func_index_to_module_index.push(i);
let name = format!("_wasm{i}_function{j}");
funcs.push((name, func));
infos.push(info);
}
func_infos.push(infos);
}
for (sig, func) in trampolines.iter().zip(compiled_trampolines) {
let name = format!("_wasm_trampoline{}", sig.as_u32());
funcs.push((name, func));
}
let ntranscoders = transcoders.len();
for (i, func) in transcoders.into_iter().enumerate() {
let name = format!("_wasm_component_transcoder{i}");
funcs.push((name, func));
}
let nalways_trap = always_trap.len();
for (i, func) in always_trap.into_iter().enumerate() {
let name = format!("_wasm_component_always_trap{i}");
funcs.push((name, func));
}
let nlowerings = lowerings.len();
for (i, func) in lowerings.into_iter().enumerate() {
let name = format!("_wasm_component_lowering{i}");
funcs.push((name, func));
}
let mut object = compiler.object(ObjectKind::Component)?;
let locs = compiler.append_code(&mut object, &funcs, tunables, &|i, idx| {
// Map from the `i`th function which is requesting the relocation to
// the index in `modules` that the function belongs to. Using that
// metadata we can resolve `idx: FuncIndex` to a `DefinedFuncIndex`
// to the index of that module's function that's being called.
//
// Note that this will panic if `i` is a function beyond the initial
// set of core wasm module functions. That's intentional, however,
// since trampolines and otherwise should not have relocations to
// resolve.
let module_index = func_index_to_module_index[i];
let defined_index = modules[StaticModuleIndex::new(module_index)]
.module
.defined_func_index(idx)
.unwrap();
// Additionally use the module index to determine where that
// module's list of functions started at to factor in as an offset
// as well.
let offset = module_func_start_index[module_index];
defined_index.index() + offset
})?;
engine.append_compiler_info(&mut object);
engine.append_bti(&mut object);
// Disassemble the result of the appending to the text section, where
// each function is in the module, into respective maps.
let mut locs = locs.into_iter().map(|(_sym, loc)| loc);
let funcs = func_infos
.into_iter()
.map(|infos| {
infos
.into_iter()
.zip(&mut locs)
.collect::<PrimaryMap<_, _>>()
})
.collect::<Vec<_>>();
let signature_to_trampoline = trampolines
.iter()
.cloned()
.zip(&mut locs)
.collect::<HashMap<_, _>>();
let transcoders = locs
.by_ref()
.take(ntranscoders)
.collect::<PrimaryMap<RuntimeTranscoderIndex, _>>();
let always_trap = locs
.by_ref()
.take(nalways_trap)
.collect::<PrimaryMap<RuntimeAlwaysTrapIndex, _>>();
let lowerings = locs
.by_ref()
.take(nlowerings)
.collect::<PrimaryMap<LoweredIndex, _>>();
assert!(locs.next().is_none());
// Convert all `ModuleTranslation` instances into `CompiledModuleInfo`
// through an `ObjectBuilder` here. This is then used to create the
// final `mmap` which is the final compilation artifact.
let mut builder = wasmtime_jit::ObjectBuilder::new(object, tunables);
let mut static_modules = PrimaryMap::new();
for ((_, module), funcs) in modules.into_iter().zip(funcs) {
// Build the list of trampolines for this module from its set of
// exported signatures, which is the list of expected trampolines,
// from the set of trampolines that were compiled for everything
// within this component.
let trampolines = module
.exported_signatures
.iter()
.map(|sig| (*sig, signature_to_trampoline[sig]))
.collect();
let info = builder.append(module, funcs, trampolines)?;
static_modules.push(info);
}
let info = CompiledComponentInfo {
always_trap,
component,
lowerings,
trampolines: trampolines
.difference(&module_trampolines)
.map(|i| (*i, signature_to_trampoline[i]))
.collect(),
transcoders,
};
let artifacts = ComponentArtifacts {
info,
types,
static_modules,
};
builder.serialize_info(&artifacts);
let mmap = builder.finish()?;
Ok((mmap, artifacts))
}
/// Final assembly step for a component from its in-memory representation.
///
/// If the `artifacts` are specified as `None` here then they will be
/// deserialized from `code_memory`.
fn from_parts(
engine: &Engine,
code_memory: Arc<CodeMemory>,
artifacts: Option<ComponentArtifacts>,
) -> Result<Component> {
let ComponentArtifacts {
info,
types,
static_modules,
} = match artifacts {
Some(artifacts) => artifacts,
None => bincode::deserialize(code_memory.wasmtime_info())?,
};
// Create a signature registration with the `Engine` for all trampolines
// and core wasm types found within this component, both for the
// component and for all included core wasm modules.
let signatures = SignatureCollection::new_for_module(
engine.signatures(),
types.module_types(),
static_modules
.iter()
.flat_map(|(_, m)| m.trampolines.iter().copied())
.chain(info.trampolines.iter().copied())
.map(|(sig, loc)| {
let trampoline = code_memory.text()[loc.start as usize..].as_ptr();
(sig, unsafe {
mem::transmute::<*const u8, VMTrampoline>(trampoline)
})
}),
);
// Assemble the `CodeObject` artifact which is shared by all core wasm
// modules as well as the final component.
let types = Arc::new(types);
let code = Arc::new(CodeObject::new(code_memory, signatures, types.into()));
// Convert all information about static core wasm modules into actual
// `Module` instances by converting each `CompiledModuleInfo`, the
// `types` type information, and the code memory to a runtime object.
let static_modules = static_modules
.into_iter()
.map(|(_, info)| Module::from_parts_raw(engine, code.clone(), info, false))
.collect::<Result<_>>()?;
Ok(Component {
inner: Arc::new(ComponentInner {
static_modules,
code,
info,
}),
})
}
pub(crate) fn env_component(&self) -> &wasmtime_environ::component::Component {
&self.inner.info.component
}
pub(crate) fn static_module(&self, idx: StaticModuleIndex) -> &Module {
&self.inner.static_modules[idx]
}
pub(crate) fn types(&self) -> &Arc<ComponentTypes> {
match self.inner.code.types() {
crate::code::Types::Component(types) => types,
// The only creator of a `Component` is itself which uses the other
// variant, so this shouldn't be possible.
crate::code::Types::Module(_) => unreachable!(),
}
}
pub(crate) fn signatures(&self) -> &SignatureCollection {
self.inner.code.signatures()
}
pub(crate) fn | text | identifier_name |
|
images.go | }
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil {
return true, nil
} else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check than Validate, which doesn't presuppose the
// existence of a local build directory. Used in CI builds to quickly validate
// if a build is actually required.
func (i *Images) Exists(registry, version string, fast bool) (bool, error) {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry, image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return false, fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return false, fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return false, fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return false, fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return false, fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return false, fmt.Errorf("remove manifest file: %w", err)
}
}
return true, nil
}
// GetManifestImages can be used to retrieve the map of built images and
// architectures.
func (i *Images) GetManifestImages(
registry, version, buildPath string,
forTarballFn func(path, origTag, newTagWithArch string) error,
) (map[string][]string, error) {
manifestImages := make(map[string][]string)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof("Getting manifest images in %s", releaseImagesPath)
archPaths, err := os.ReadDir(releaseImagesPath)
if err != nil {
return nil, fmt.Errorf("read images path %s: %w", releaseImagesPath, err)
}
for _, archPath := range archPaths {
arch := archPath.Name()
if !archPath.IsDir() {
logrus.Infof("Skipping %s because it's not a directory", arch)
continue
}
if err := filepath.Walk(
filepath.Join(releaseImagesPath, arch),
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
fileName := info.Name()
if !strings.HasSuffix(fileName, ".tar") {
logrus.Infof("Skipping non-tarball %s", fileName)
return nil
}
origTag, err := i.RepoTagFromTarball(path)
if err != nil {
return fmt.Errorf("getting repo tags for tarball: %w", err)
}
tagMatches := tagRegex.FindStringSubmatch(origTag)
if len(tagMatches) != 2 {
return fmt.Errorf(
"malformed tag %s in %s", origTag, path,
)
}
binary := tagMatches[1]
newTag := filepath.Join(
registry,
strings.TrimSuffix(binary, "-"+arch),
)
newTagWithArch := fmt.Sprintf("%s-%s:%s", newTag, arch, version)
manifestImages[newTag] = append(manifestImages[newTag], arch)
if forTarballFn != nil {
if err := forTarballFn(
path, origTag, newTagWithArch, | ); err != nil {
return fmt.Errorf("executing tarball callback: %w", err) | random_line_split |
|
images.go | return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return tagOutput.OutputTrimNL(), nil
}
func (*defaultImageImpl) SignImage(signer *sign.Signer, reference string) error {
_, err := signer.SignImage(reference)
return err
}
func (*defaultImageImpl) VerifyImage(_ *sign.Signer, _ string) error {
// TODO: bypassing this for now due to the fail in the promotion process
// that sign the images. We will release the Feb/2023 patch releases without full
// signatures but we will sign those in a near future in a deatached process
// revert this change when the patches are out
// _, err := signer.VerifyImage(reference)
// return err
return nil
}
var tagRegex = regexp.MustCompile(`^.+/(.+):.+$`)
// PublishImages releases container images to the provided target registry
func (i *Images) Publish(registry, version, buildPath string) error {
version = i.normalizeVersion(version)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof(
"Pushing container images from %s to registry %s",
releaseImagesPath, registry,
)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(path, origTag, newTagWithArch string) error {
if err := i.Execute(
"docker", "load", "-qi", path,
); err != nil {
return fmt.Errorf("load container image: %w", err)
}
if err := i.Execute(
"docker", "tag", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("tag container image: %w", err)
}
logrus.Infof("Pushing %s", newTagWithArch)
if err := i.Execute(
"gcloud", "docker", "--", "push", newTagWithArch,
); err != nil {
return fmt.Errorf("push container image: %w", err)
}
if err := i.SignImage(i.signer, newTagWithArch); err != nil {
return fmt.Errorf("sign container image: %w", err)
}
if err := i.Execute(
"docker", "rmi", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("remove local container image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
if err := os.Setenv("DOCKER_CLI_EXPERIMENTAL", "enabled"); err != nil {
return fmt.Errorf("enable docker experimental CLI: %w", err)
}
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
logrus.Infof("Creating manifest image %s", imageVersion)
manifests := []string{}
for _, arch := range arches {
manifests = append(manifests,
fmt.Sprintf("%s-%s:%s", image, arch, version),
)
}
if err := i.Execute("docker", append(
[]string{"manifest", "create", "--amend", imageVersion},
manifests...,
)...); err != nil {
return fmt.Errorf("create manifest: %w", err)
}
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil {
return true, nil
} else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check than Validate, which doesn't presuppose the
// existence of a local build directory. Used in CI builds to quickly validate
// if a build is actually required.
func (i *Images) Exists(registry, version string, fast bool) (bool, error) | {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry, image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return false, fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
manifest := string(manifestBytes) | identifier_body |
|
images.go | return command.New(cmd, args...).RunSilentSuccess()
}
func (*defaultImageImpl) ExecuteOutput(cmd string, args ...string) (string, error) {
res, err := command.New(cmd, args...).RunSilentSuccessOutput()
if err != nil {
return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return tagOutput.OutputTrimNL(), nil
}
func (*defaultImageImpl) SignImage(signer *sign.Signer, reference string) error {
_, err := signer.SignImage(reference)
return err
}
func (*defaultImageImpl) VerifyImage(_ *sign.Signer, _ string) error {
// TODO: bypassing this for now due to the fail in the promotion process
// that sign the images. We will release the Feb/2023 patch releases without full
// signatures but we will sign those in a near future in a deatached process
// revert this change when the patches are out
// _, err := signer.VerifyImage(reference)
// return err
return nil
}
var tagRegex = regexp.MustCompile(`^.+/(.+):.+$`)
// PublishImages releases container images to the provided target registry
func (i *Images) Publish(registry, version, buildPath string) error {
version = i.normalizeVersion(version)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof(
"Pushing container images from %s to registry %s",
releaseImagesPath, registry,
)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(path, origTag, newTagWithArch string) error {
if err := i.Execute(
"docker", "load", "-qi", path,
); err != nil {
return fmt.Errorf("load container image: %w", err)
}
if err := i.Execute(
"docker", "tag", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("tag container image: %w", err)
}
logrus.Infof("Pushing %s", newTagWithArch)
if err := i.Execute(
"gcloud", "docker", "--", "push", newTagWithArch,
); err != nil {
return fmt.Errorf("push container image: %w", err)
}
if err := i.SignImage(i.signer, newTagWithArch); err != nil {
return fmt.Errorf("sign container image: %w", err)
}
if err := i.Execute(
"docker", "rmi", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("remove local container image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
if err := os.Setenv("DOCKER_CLI_EXPERIMENTAL", "enabled"); err != nil {
return fmt.Errorf("enable docker experimental CLI: %w", err)
}
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
logrus.Infof("Creating manifest image %s", imageVersion)
manifests := []string{}
for _, arch := range arches {
manifests = append(manifests,
fmt.Sprintf("%s-%s:%s", image, arch, version),
)
}
if err := i.Execute("docker", append(
[]string{"manifest", "create", "--amend", imageVersion},
manifests...,
)...); err != nil {
return fmt.Errorf("create manifest: %w", err)
}
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil | else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check than Validate, which doesn't presuppose the
// existence of a local build directory. Used in CI builds to quickly validate
// if a build is actually required.
func (i *Images) Exists(registry, version string, fast bool) (bool, error) {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages := ManifestImages
arches := SupportedArchitectures
if fast {
arches = FastArchitectures
}
for _, image := range manifestImages {
imageVersion := fmt.Sprintf("%s/%s:%s", registry | {
return true, nil
} | conditional_block |
images.go | (impl imageImpl) {
i.imageImpl = impl
}
// imageImpl is a client for working with container images.
//
//counterfeiter:generate . imageImpl
type imageImpl interface {
Execute(cmd string, args ...string) error
ExecuteOutput(cmd string, args ...string) (string, error)
RepoTagFromTarball(path string) (string, error)
SignImage(*sign.Signer, string) error
VerifyImage(*sign.Signer, string) error
}
type defaultImageImpl struct{}
func (*defaultImageImpl) Execute(cmd string, args ...string) error {
return command.New(cmd, args...).RunSilentSuccess()
}
func (*defaultImageImpl) ExecuteOutput(cmd string, args ...string) (string, error) {
res, err := command.New(cmd, args...).RunSilentSuccessOutput()
if err != nil {
return "", err
}
return res.OutputTrimNL(), nil
}
func (*defaultImageImpl) RepoTagFromTarball(path string) (string, error) {
tagOutput, err := command.
New("tar", "xf", path, "manifest.json", "-O").
Pipe("jq", "-r", ".[0].RepoTags[0]").
RunSilentSuccessOutput()
if err != nil {
return "", err
}
return tagOutput.OutputTrimNL(), nil
}
func (*defaultImageImpl) SignImage(signer *sign.Signer, reference string) error {
_, err := signer.SignImage(reference)
return err
}
func (*defaultImageImpl) VerifyImage(_ *sign.Signer, _ string) error {
// TODO: bypassing this for now due to the fail in the promotion process
// that sign the images. We will release the Feb/2023 patch releases without full
// signatures but we will sign those in a near future in a deatached process
// revert this change when the patches are out
// _, err := signer.VerifyImage(reference)
// return err
return nil
}
var tagRegex = regexp.MustCompile(`^.+/(.+):.+$`)
// PublishImages releases container images to the provided target registry
func (i *Images) Publish(registry, version, buildPath string) error {
version = i.normalizeVersion(version)
releaseImagesPath := filepath.Join(buildPath, ImagesPath)
logrus.Infof(
"Pushing container images from %s to registry %s",
releaseImagesPath, registry,
)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(path, origTag, newTagWithArch string) error {
if err := i.Execute(
"docker", "load", "-qi", path,
); err != nil {
return fmt.Errorf("load container image: %w", err)
}
if err := i.Execute(
"docker", "tag", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("tag container image: %w", err)
}
logrus.Infof("Pushing %s", newTagWithArch)
if err := i.Execute(
"gcloud", "docker", "--", "push", newTagWithArch,
); err != nil {
return fmt.Errorf("push container image: %w", err)
}
if err := i.SignImage(i.signer, newTagWithArch); err != nil {
return fmt.Errorf("sign container image: %w", err)
}
if err := i.Execute(
"docker", "rmi", origTag, newTagWithArch,
); err != nil {
return fmt.Errorf("remove local container image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
if err := os.Setenv("DOCKER_CLI_EXPERIMENTAL", "enabled"); err != nil {
return fmt.Errorf("enable docker experimental CLI: %w", err)
}
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
logrus.Infof("Creating manifest image %s", imageVersion)
manifests := []string{}
for _, arch := range arches {
manifests = append(manifests,
fmt.Sprintf("%s-%s:%s", image, arch, version),
)
}
if err := i.Execute("docker", append(
[]string{"manifest", "create", "--amend", imageVersion},
manifests...,
)...); err != nil {
return fmt.Errorf("create manifest: %w", err)
}
for _, arch := range arches {
logrus.Infof(
"Annotating %s-%s:%s with --arch %s",
image, arch, version, arch,
)
if err := i.Execute(
"docker", "manifest", "annotate", "--arch", arch,
imageVersion, fmt.Sprintf("%s-%s:%s", image, arch, version),
); err != nil {
return fmt.Errorf("annotate manifest with arch: %w", err)
}
}
logrus.Infof("Pushing manifest image %s", imageVersion)
if err := wait.ExponentialBackoff(wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Steps: 5,
}, func() (bool, error) {
if err := i.Execute("docker", "manifest", "push", imageVersion, "--purge"); err == nil {
return true, nil
} else if strings.Contains(err.Error(), "request canceled while waiting for connection") {
// The error is unfortunately not exported:
// https://github.com/golang/go/blob/dc04f3b/src/net/http/client.go#L720
// https://github.com/golang/go/blob/dc04f3b/src/net/http/transport.go#L2518
// ref: https://github.com/kubernetes/release/issues/2810
logrus.Info("Retrying manifest push")
return false, nil
}
return false, err
}); err != nil {
return fmt.Errorf("push manifest: %w", err)
}
if err := i.SignImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("sign manifest list: %w", err)
}
}
return nil
}
// Validates that image manifests have been pushed to a specified remote
// registry.
func (i *Images) Validate(registry, version, buildPath string) error {
logrus.Infof("Validating image manifests in %s", registry)
version = i.normalizeVersion(version)
manifestImages, err := i.GetManifestImages(
registry, version, buildPath,
func(_, _, image string) error {
logrus.Infof("Verifying that image is signed: %s", image)
if err := i.VerifyImage(i.signer, image); err != nil {
return fmt.Errorf("verify signed image: %w", err)
}
return nil
},
)
if err != nil {
return fmt.Errorf("get manifest images: %w", err)
}
logrus.Infof("Got manifest images %+v", manifestImages)
for image, arches := range manifestImages {
imageVersion := fmt.Sprintf("%s:%s", image, version)
manifestBytes, err := crane.Manifest(imageVersion)
if err != nil {
return fmt.Errorf("get remote manifest from %s: %w", imageVersion, err)
}
logrus.Info("Verifying that image manifest list is signed")
if err := i.VerifyImage(i.signer, imageVersion); err != nil {
return fmt.Errorf("verify signed manifest list: %w", err)
}
manifest := string(manifestBytes)
manifestFile, err := os.CreateTemp("", "manifest-")
if err != nil {
return fmt.Errorf("create temp file for manifest: %w", err)
}
if _, err := manifestFile.WriteString(manifest); err != nil {
return fmt.Errorf("write manifest to %s: %w", manifestFile.Name(), err)
}
for _, arch := range arches {
logrus.Infof(
"Checking image digest for %s on %s architecture", image, arch,
)
digest, err := i.ExecuteOutput(
"jq", "--arg", "a", arch, "-r",
".manifests[] | select(.platform.architecture == $a) | .digest",
manifestFile.Name(),
)
if err != nil {
return fmt.Errorf("get digest from manifest file %s for arch %s: %w", manifestFile.Name(), arch, err)
}
if digest == "" {
return fmt.Errorf(
"could not find the image digest for %s on %s",
imageVersion, arch,
)
}
logrus.Infof("Digest for %s on %s: %s", imageVersion, arch, digest)
}
if err := os.RemoveAll(manifestFile.Name()); err != nil {
return fmt.Errorf("remove manifest file: %w", err)
}
}
return nil
}
// Exists verifies that a set of image manifests exists on a specified remote
// registry. This is a simpler check | SetImpl | identifier_name |
|
settings.go | PluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) String() string {
if s, ok := _genPluginTypeToString[g]; ok |
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool {
return _genPluginTypeToIsGo[g]
}
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs.
Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty | {
return s
} | conditional_block |
settings.go | enPluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) String() string {
if s, ok := _genPluginTypeToString[g]; ok {
return s
}
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool {
return _genPluginTypeToIsGo[g]
}
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs. | Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty" | random_line_split |
|
settings.go | PluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) String() string {
if s, ok := _genPluginTypeToString[g]; ok {
return s
}
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool |
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs.
Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty | {
return _genPluginTypeToIsGo[g]
} | identifier_body |
settings.go | PluginTypeNone GenPluginType = iota
// GenPluginTypeGo says the plugin is a Golang plugin that
// is or uses github.com/golang/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGo
// GenPluginTypeGogo says the plugin is a Golang plugin that
// is or uses github.com/gogo/protobuf.
// This will use GenGoPluginOptions.
GenPluginTypeGogo
)
var (
// ConfigFilenames are all possible config filenames.
ConfigFilenames = []string{
DefaultConfigFilename,
"prototool.json",
}
_genPluginTypeToString = map[GenPluginType]string{
GenPluginTypeNone: "",
GenPluginTypeGo: "go",
GenPluginTypeGogo: "gogo",
}
_stringToGenPluginType = map[string]GenPluginType{
"": GenPluginTypeNone,
"go": GenPluginTypeGo,
"gogo": GenPluginTypeGogo,
}
_genPluginTypeToIsGo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: true,
GenPluginTypeGogo: false,
}
_genPluginTypeToIsGogo = map[GenPluginType]bool{
GenPluginTypeNone: false,
GenPluginTypeGo: false,
GenPluginTypeGogo: true,
}
)
// GenPluginType is a type of protoc plugin.
type GenPluginType int
// String implements fmt.Stringer.
func (g GenPluginType) | () string {
if s, ok := _genPluginTypeToString[g]; ok {
return s
}
return strconv.Itoa(int(g))
}
// The Is functions do not validate if the plugin type is known
// as this is supposed to be done in ConfigProvider.
// It's a lot easier if they just return a bool.
// IsGo returns true if the plugin type is associated with
// github.com/golang/protobuf.
func (g GenPluginType) IsGo() bool {
return _genPluginTypeToIsGo[g]
}
// IsGogo returns true if the plugin type is associated with
// github.com/gogo/protobuf.
func (g GenPluginType) IsGogo() bool {
return _genPluginTypeToIsGogo[g]
}
// ParseGenPluginType parses the GenPluginType from the given string.
//
// Input is case-insensitive.
func ParseGenPluginType(s string) (GenPluginType, error) {
genPluginType, ok := _stringToGenPluginType[strings.ToLower(s)]
if !ok {
return GenPluginTypeNone, fmt.Errorf("could not parse %s to a GenPluginType", s)
}
return genPluginType, nil
}
// Config is the main config.
//
// Configs are derived from ExternalConfigs, which represent the Config
// in a more palpable format for configuration via a config file
// or flags.
//
// String slices will be deduped and sorted if returned from this package.
// Configs will be validated if returned from this package.
//
// All paths returned should be absolute paths. Outside of this package,
// all other internal packages should verify that all given paths are
// absolute, except for the internal/text package.
type Config struct {
// The working directory path.
// Expected to be absolute path.
DirPath string
// The prefixes to exclude.
// Expected to be absolute paths.
// Expected to be unique.
ExcludePrefixes []string
// The compile config.
Compile CompileConfig
// The create config.
Create CreateConfig
// Lint is a special case. If nothing is set, the defaults are used. Either IDs,
// or Group/IncludeIDs/ExcludeIDs can be set, but not both. There can be no overlap
// between IncludeIDs and ExcludeIDs.
Lint LintConfig
// The gen config.
Gen GenConfig
}
// CompileConfig is the compile config.
type CompileConfig struct {
// The Protobuf version to use from https://github.com/protocolbuffers/protobuf/releases.
// Must have a valid protoc zip file asset, so for example 3.5.0 is a valid version
// but 3.5.0.1 is not.
ProtobufVersion string
// IncludePaths are the additional paths to include with -I to protoc.
// Expected to be absolute paths.
// Expected to be unique.
IncludePaths []string
// IncludeWellKnownTypes says to add the Google well-known types with -I to protoc.
IncludeWellKnownTypes bool
// AllowUnusedImports says to not error when an import is not used.
AllowUnusedImports bool
}
// CreateConfig is the create config.
type CreateConfig struct {
// The map from directory to the package to use as the base.
// Directories expected to be absolute paths.
DirPathToBasePackage map[string]string
}
// LintConfig is the lint config.
type LintConfig struct {
// NoDefault is set to exclude the default set of linters.
NoDefault bool
// IncludeIDs are the list of linter IDs to use in addition to the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with ExcludeIDs.
IncludeIDs []string
// ExcludeIDs are the list of linter IDs to exclude from the defaults.
// Expected to be all uppercase.
// Expected to be unique.
// Expected to have no overlap with IncludeIDs.
ExcludeIDs []string
// IgnoreIDToFilePaths is the map of ID to absolute file path to ignore.
// IDs expected to be all upper-case.
// File paths expected to be absolute paths.
IgnoreIDToFilePaths map[string][]string
}
// GenConfig is the gen config.
type GenConfig struct {
// The go plugin options.
GoPluginOptions GenGoPluginOptions
// The plugins.
// These will be sorted by name if returned from this package.
Plugins []GenPlugin
}
// GenGoPluginOptions are options for go plugins.
//
// This will be used for plugin types go, gogo, gogrpc, gogogrpc.
type GenGoPluginOptions struct {
// The base import path. This should be the go path of the config file.
// This is required for go plugins.
ImportPath string
// ExtraModifiers to include with Mfile=package.
ExtraModifiers map[string]string
}
// GenPlugin is a plugin to use.
type GenPlugin struct {
// The name of the plugin. For example, if you want to use
// protoc-gen-gogoslick, the name is "gogoslick".
Name string
// The path to the executable. For example, if the name is "grpc-cpp"
// but the path to the executable "protoc-gen-grpc-cpp" is "/usr/local/bin/grpc_cpp_plugin",
// then this will be "/usr/local/bin/grpc_cpp_plugin".
Path string
// The type, if any. This will be GenPluginTypeNone if
// there is no specific type.
Type GenPluginType
// Extra flags to pass.
// If there is an associated type, some flags may be generated,
// for example plugins=grpc or Mfile=package modifiers.
Flags string
// The path to output to.
// Must be relative in a config file.
OutputPath OutputPath
}
// OutputPath is an output path.
//
// We need the relative path for go package references for generation.
// TODO: we might want all paths to have the given path and absolute path,
// see if we need this.
type OutputPath struct {
// Must be relative.
RelPath string
AbsPath string
}
// ExternalConfig is the external representation of Config.
//
// It is meant to be set by a YAML or JSON config file, or flags.
type ExternalConfig struct {
Excludes []string `json:"excludes,omitempty" yaml:"excludes,omitempty"`
Protoc struct {
AllowUnusedImports bool `json:"allow_unused_imports,omitempty" yaml:"allow_unused_imports,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
Includes []string `json:"includes,omitempty" yaml:"includes,omitempty"`
} `json:"protoc,omitempty" yaml:"protoc,omitempty"`
Create struct {
Packages []struct {
Directory string `json:"directory,omitempty" yaml:"directory,omitempty"`
Name string `json:"name,omitempty" yaml:"name,omitempty"`
} `json:"packages,omitempty" yaml:"packages,omitempty"`
} `json:"create,omitempty" yaml:"create,omitempty"`
Lint struct {
Ignores []struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
Files []string `json:"files,omitempty" yaml:"files,omitempty"`
}
Rules struct {
NoDefault bool `json:"no_default,omitempty" yaml:"no_default,omitempty"`
Add []string `json:"add" yaml:"add"`
Remove []string `json:"remove" yaml:"remove"`
}
} `json:"lint,omitempty" yaml:"lint,omitempty"`
Gen struct {
GoOptions struct {
ImportPath string `json:"import_path,omitempty" yaml:"import_path,omitempty"`
ExtraModifiers map[string]string `json:"extra_modifiers,omitempty" | String | identifier_name |
lib.rs | exchange.com/questions/20139/gradients-for-bias-terms-in-backpropagation
//!- https://cs231n.github.io/optimization-2/
//!- https://cs231n.github.io/neural-networks-case-study/#grad
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://github.com/karpathy/micrograd
//!- https://rufflewind.com/2016-12-30/reverse-mode-automatic-differentiation
//! - https://github.com/ibab/rust-ad
//! - https://github.com/Rufflewind/revad/blob/eb3978b3ccdfa8189f3ff59d1ecee71f51c33fd7/revad.py
//! - https://github.com/srirambandi/ai
//!- https://discuss.pytorch.org/t/is-pytorch-autograd-tape-based/13992/3
//!- https://www.reddit.com/r/MachineLearning/comments/8ep130/d_how_does_autograd_work/
//!- https://github.com/mattjj/autodidact
//!- https://github.com/karpathy/recurrentjs
//!- https://github.com/karpathy/randomfun
//!- https://medium.com/@ralphmao95/simple-autograd-implementation-understand-automatic-differentiation-hand-by-hand-9e86f6d703ab
//!- https://evcu.github.io/ml/autograd/
//!- https://blog.paperspace.com/pytorch-101-understanding-graphs-and-automatic-differentiation/
//!- https://github.com/maciejkula/wyrm
//!- https://medium.com/@maciejkula/building-an-autodifferentiation-library-9ccf32c7a658
//!- https://github.com/evcu/numpy_autograd/blob/master/my_autograd.py#L147
//!- https://github.com/evcu/numpy_autograd/blob/master/Autograd.ipynb
//!- https://cs231n.github.io/optimization-2/
//!- https://github.com/explosion/thinc
//!- https://github.com/joelgrus/joelnet
//!- https://github.com/QuantStack/xtensor
//!- https://github.com/ThinkingTransistor/Sigma
//!- https://github.com/mratsim/Arraymancer
//!- https://github.com/siekmanj/sieknet
//!- https://github.com/siekmanj/sieknet_2.0
//!- https://github.com/Daniel-Liu-c0deb0t/Java-Machine-Learning
//!- https://github.com/karpathy/micrograd
//!
//!This README is based on:
//!
//!- https://github.com/bilal2vec/pytorch_zoo
//!- https://github.com/bilal2vec/grover
//!- https://github.com/rish-16/gpt2client
//!- https://github.com/mxbi/mlcrate
//!- https://github.com/athityakumar/colorls
//!- https://github.com/amitmerchant1990/electron-markdownify
//!
//!I used carbon.now.sh with the "Shades of Purple" theme for the screenshot at the beginning of this README
//!
//!This project contains ~4300 lines of code
pub mod errors;
mod ops;
pub mod tensor;
use errors::TensorError;
use tensor::Tensor;
pub fn add<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs + rhs
}
pub fn sub<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs - rhs
}
pub fn mul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs * rhs
}
pub fn div<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs / rhs
}
pub fn pow<'a>(lhs: &'a Tensor, exp: f32) -> Result<Tensor<'a>, TensorError> {
lhs.pow(exp)
}
pub fn sqrt<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sqrt()
}
pub fn exp<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.exp()
}
pub fn log10<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log10()
}
pub fn log<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log()
}
pub fn abs<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.abs()
}
pub fn sin<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sin()
}
pub fn cos<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.cos()
}
pub fn tan<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.tan()
}
pub fn sum<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.sum(dim)
}
pub fn mean<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.mean(dim)
}
pub fn max<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.max(dim)
}
pub fn min<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.min(dim)
}
pub fn argmax<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmax(dim)
}
pub fn argmin<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmin(dim)
}
pub fn matmul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.matmul(rhs)
}
pub fn concat<'a>(lhs: &'a Tensor, rhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.concat(&rhs, dim)
}
#[cfg(test)]
mod tests {
use super::tensor::*;
use super::*;
#[test]
fn test_add() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = add(&a, &b);
assert!((c.data == vec![4.0, 6.0]) && (c.shape == vec![2]))
}
#[test]
fn test_subtract() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sub(&a, &b);
assert!((c.data == vec![0.0, 0.0]) && (c.shape == vec![2]))
}
#[test]
fn test_mul() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mul(&a, &b);
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_div() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = div(&a, &b);
assert!((c.data == vec![1.0, 1.0]) && (c.shape == vec![2]))
}
#[test]
fn test_pow() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = pow(&a, 2.0).unwrap();
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn | test_sum | identifier_name |
|
lib.rs | the MIT License - see the license file for details
//!
//!# Acknowledgements
//!
//!The fast.ai deep learning from the foundations course (https://course.fast.ai/part2) teaches a lot about how to make your own deep learning library
//!
//!Some of the resources that I found useful when working on this library include:
//!
//!- http://blog.ezyang.com/2019/05/pytorch-internals/
//!- https://pytorch.org/tutorials/beginner/nn_tutorial.html
//!- https://eisenjulian.github.io/deep-learning-in-100-lines/
//!- https://medium.com/@florian.caesar/how-to-create-a-machine-learning-framework-from-scratch-in-491-steps-93428369a4eb
//!- https://medium.com/@johan.mabille/how-we-wrote-xtensor-1-n-n-dimensional-containers-f79f9f4966a7
//!- https://erikpartridge.com/2019-03/rust-ml-simd-blas-lapack
//!- https://medium.com/@GolDDranks/things-rust-doesnt-let-you-do-draft-f596a3c740a5
//!- https://datascience.stackexchange.com/questions/20139/gradients-for-bias-terms-in-backpropagation
//!- https://cs231n.github.io/optimization-2/
//!- https://cs231n.github.io/neural-networks-case-study/#grad
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://github.com/karpathy/micrograd
//!- https://rufflewind.com/2016-12-30/reverse-mode-automatic-differentiation
//! - https://github.com/ibab/rust-ad
//! - https://github.com/Rufflewind/revad/blob/eb3978b3ccdfa8189f3ff59d1ecee71f51c33fd7/revad.py
//! - https://github.com/srirambandi/ai
//!- https://discuss.pytorch.org/t/is-pytorch-autograd-tape-based/13992/3
//!- https://www.reddit.com/r/MachineLearning/comments/8ep130/d_how_does_autograd_work/
//!- https://github.com/mattjj/autodidact
//!- https://github.com/karpathy/recurrentjs
//!- https://github.com/karpathy/randomfun
//!- https://medium.com/@ralphmao95/simple-autograd-implementation-understand-automatic-differentiation-hand-by-hand-9e86f6d703ab
//!- https://evcu.github.io/ml/autograd/
//!- https://blog.paperspace.com/pytorch-101-understanding-graphs-and-automatic-differentiation/
//!- https://github.com/maciejkula/wyrm
//!- https://medium.com/@maciejkula/building-an-autodifferentiation-library-9ccf32c7a658
//!- https://github.com/evcu/numpy_autograd/blob/master/my_autograd.py#L147
//!- https://github.com/evcu/numpy_autograd/blob/master/Autograd.ipynb
//!- https://cs231n.github.io/optimization-2/
//!- https://github.com/explosion/thinc
//!- https://github.com/joelgrus/joelnet
//!- https://github.com/QuantStack/xtensor
//!- https://github.com/ThinkingTransistor/Sigma
//!- https://github.com/mratsim/Arraymancer
//!- https://github.com/siekmanj/sieknet
//!- https://github.com/siekmanj/sieknet_2.0
//!- https://github.com/Daniel-Liu-c0deb0t/Java-Machine-Learning
//!- https://github.com/karpathy/micrograd
//!
//!This README is based on:
//!
//!- https://github.com/bilal2vec/pytorch_zoo
//!- https://github.com/bilal2vec/grover
//!- https://github.com/rish-16/gpt2client
//!- https://github.com/mxbi/mlcrate
//!- https://github.com/athityakumar/colorls
//!- https://github.com/amitmerchant1990/electron-markdownify
//!
//!I used carbon.now.sh with the "Shades of Purple" theme for the screenshot at the beginning of this README
//!
//!This project contains ~4300 lines of code
pub mod errors;
mod ops;
pub mod tensor;
use errors::TensorError;
use tensor::Tensor;
pub fn add<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs + rhs
}
pub fn sub<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs - rhs
}
pub fn mul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs * rhs
}
pub fn div<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs / rhs
}
pub fn pow<'a>(lhs: &'a Tensor, exp: f32) -> Result<Tensor<'a>, TensorError> {
lhs.pow(exp)
}
pub fn sqrt<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sqrt()
}
pub fn exp<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.exp()
}
pub fn log10<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log10()
}
pub fn log<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log()
}
pub fn abs<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.abs()
}
pub fn sin<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sin()
}
pub fn cos<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.cos()
}
pub fn tan<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.tan()
}
pub fn sum<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.sum(dim)
}
pub fn mean<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.mean(dim)
}
pub fn max<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.max(dim)
}
pub fn min<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.min(dim)
}
pub fn argmax<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmax(dim)
}
pub fn argmin<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmin(dim) |
pub fn matmul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.matmul(rhs)
}
pub fn concat<'a>(lhs: &'a Tensor, rhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.concat(&rhs, dim)
}
#[cfg(test)]
mod tests {
use super::tensor::*;
use super::*;
#[test]
fn test_add() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = add(&a, &b);
assert!((c.data == vec![4.0, 6.0]) && (c.shape == vec![2]))
}
#[test]
fn test_subtract() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sub(&a, &b);
assert!((c.data == vec![0.0, | } | random_line_split |
lib.rs | - https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
//!- https://stackoverflow.com/questions/38082835/backpropagation-in-gradient-descent-for-neural-networks-vs-linear-regression
//!- https://github.com/karpathy/micrograd
//!- https://rufflewind.com/2016-12-30/reverse-mode-automatic-differentiation
//! - https://github.com/ibab/rust-ad
//! - https://github.com/Rufflewind/revad/blob/eb3978b3ccdfa8189f3ff59d1ecee71f51c33fd7/revad.py
//! - https://github.com/srirambandi/ai
//!- https://discuss.pytorch.org/t/is-pytorch-autograd-tape-based/13992/3
//!- https://www.reddit.com/r/MachineLearning/comments/8ep130/d_how_does_autograd_work/
//!- https://github.com/mattjj/autodidact
//!- https://github.com/karpathy/recurrentjs
//!- https://github.com/karpathy/randomfun
//!- https://medium.com/@ralphmao95/simple-autograd-implementation-understand-automatic-differentiation-hand-by-hand-9e86f6d703ab
//!- https://evcu.github.io/ml/autograd/
//!- https://blog.paperspace.com/pytorch-101-understanding-graphs-and-automatic-differentiation/
//!- https://github.com/maciejkula/wyrm
//!- https://medium.com/@maciejkula/building-an-autodifferentiation-library-9ccf32c7a658
//!- https://github.com/evcu/numpy_autograd/blob/master/my_autograd.py#L147
//!- https://github.com/evcu/numpy_autograd/blob/master/Autograd.ipynb
//!- https://cs231n.github.io/optimization-2/
//!- https://github.com/explosion/thinc
//!- https://github.com/joelgrus/joelnet
//!- https://github.com/QuantStack/xtensor
//!- https://github.com/ThinkingTransistor/Sigma
//!- https://github.com/mratsim/Arraymancer
//!- https://github.com/siekmanj/sieknet
//!- https://github.com/siekmanj/sieknet_2.0
//!- https://github.com/Daniel-Liu-c0deb0t/Java-Machine-Learning
//!- https://github.com/karpathy/micrograd
//!
//!This README is based on:
//!
//!- https://github.com/bilal2vec/pytorch_zoo
//!- https://github.com/bilal2vec/grover
//!- https://github.com/rish-16/gpt2client
//!- https://github.com/mxbi/mlcrate
//!- https://github.com/athityakumar/colorls
//!- https://github.com/amitmerchant1990/electron-markdownify
//!
//!I used carbon.now.sh with the "Shades of Purple" theme for the screenshot at the beginning of this README
//!
//!This project contains ~4300 lines of code
pub mod errors;
mod ops;
pub mod tensor;
use errors::TensorError;
use tensor::Tensor;
pub fn add<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs + rhs
}
pub fn sub<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs - rhs
}
pub fn mul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs * rhs
}
pub fn div<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Tensor<'a> {
lhs / rhs
}
pub fn pow<'a>(lhs: &'a Tensor, exp: f32) -> Result<Tensor<'a>, TensorError> {
lhs.pow(exp)
}
pub fn sqrt<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sqrt()
}
pub fn exp<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.exp()
}
pub fn log10<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log10()
}
pub fn log<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.log()
}
pub fn abs<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.abs()
}
pub fn sin<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.sin()
}
pub fn cos<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.cos()
}
pub fn tan<'a>(lhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.tan()
}
pub fn sum<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.sum(dim)
}
pub fn mean<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.mean(dim)
}
pub fn max<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.max(dim)
}
pub fn min<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.min(dim)
}
pub fn argmax<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmax(dim)
}
pub fn argmin<'a>(lhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.argmin(dim)
}
pub fn matmul<'a>(lhs: &'a Tensor, rhs: &'a Tensor) -> Result<Tensor<'a>, TensorError> {
lhs.matmul(rhs)
}
pub fn concat<'a>(lhs: &'a Tensor, rhs: &'a Tensor, dim: isize) -> Result<Tensor<'a>, TensorError> {
lhs.concat(&rhs, dim)
}
#[cfg(test)]
mod tests {
use super::tensor::*;
use super::*;
#[test]
fn test_add() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = add(&a, &b);
assert!((c.data == vec![4.0, 6.0]) && (c.shape == vec![2]))
}
#[test]
fn test_subtract() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sub(&a, &b);
assert!((c.data == vec![0.0, 0.0]) && (c.shape == vec![2]))
}
#[test]
fn test_mul() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = mul(&a, &b);
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_div() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let b = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = div(&a, &b);
assert!((c.data == vec![1.0, 1.0]) && (c.shape == vec![2]))
}
#[test]
fn test_pow() {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = pow(&a, 2.0).unwrap();
assert!((c.data == vec![4.0, 9.0]) && (c.shape == vec![2]))
}
#[test]
fn test_sum() | {
let a = Tensor::new(vec![2.0, 3.0], &[2]).unwrap();
let c = sum(&a, 0).unwrap();
assert!((c.data == vec![5.0]) && (c.shape == vec![1]))
} | identifier_body |
|
lib.rs | Array2<T>
where
T: Copy,
{
fn original(&self) -> Array2<T> {
self.clone()
}
fn rot90_clockwise(&self) -> Array2<T> {
let mut arr = self.clone();
arr.swap_axes(0, 1);
arr.flip_horizontal()
}
fn rot180_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..;-1]));
arr
}
fn rot270_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr.swap_axes(0, 1);
arr
}
fn flip_vertical(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..]));
arr
}
fn flip_horizontal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr
}
fn flip_main_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.t());
arr
}
fn flip_sub_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr: Array2<T> = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.rot270_clockwise().t());
arr.rot90_clockwise()
}
}
#[allow(unused)]
#[derive(Eq)]
pub struct Tile {
tile_id: usize,
sub_image: Array2<u8>,
borders: Vec<(u32, u32, u32, u32)>,
}
impl PartialEq for Tile {
fn eq(&self, other: &Self) -> bool {
self.tile_id == other.tile_id
}
}
use std::fmt::Debug;
impl Debug for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{}]", self.tile_id)?;
Ok(())
}
}
impl Tile {
pub fn new(data: &str) -> Self {
let lines = data
.split('\n')
.map(|s| s.trim_end().to_string())
.collect::<Vec<_>>();
let shape = lines[1].len() - 2;
let tile_id = parse_tile::parse_tile_id(&lines[0]).unwrap();
let (top, top_rev) = parse_tile::parse_border(&lines[1]).unwrap();
let left_col = lines
.iter()
.skip(1)
.map(|s| s.chars().next().unwrap())
.collect::<String>();
let (left, left_rev) = parse_tile::parse_border(&left_col).unwrap();
let right_col = lines
.iter()
.skip(1)
.map(|s| s.chars().last().unwrap())
.collect::<String>();
let (right, right_rev) = parse_tile::parse_border(&right_col).unwrap();
let (bottom, bottom_rev) = parse_tile::parse_border(&lines[lines.len() - 1]).unwrap();
let mut sub_image = unsafe { Array2::<u8>::uninitialized((shape, shape)) };
for (i, row) in lines.iter().enumerate().skip(2).take(shape) {
let row_pixels = parse_tile::parse_sub_image(&row[1..row.len() - 1]).unwrap();
sub_image.row_mut(i - 2).assign(&row_pixels);
}
Self {
tile_id,
sub_image,
borders: vec![
(top, right, bottom, left), // original sub image
(left_rev, top, right_rev, bottom), // rotate 90 degree clockwise
(bottom_rev, left_rev, top_rev, right_rev), // rotate 180 degree clockwise
(right, bottom_rev, left, top_rev), // rotate 270 degree clockwise
(bottom, right_rev, top, left_rev), // flip vertical
(top_rev, left, bottom_rev, right), // flip horizontal
(left, bottom, right, top), // flip along main diagonal
(right_rev, top_rev, left_rev, bottom_rev), // flip along sub diagonal
],
}
}
pub fn get_sub_image(&self, idx: usize) -> Array2<u8> {
match idx {
0 => self.sub_image.original(),
1 => self.sub_image.rot90_clockwise(),
2 => self.sub_image.rot180_clockwise(),
3 => self.sub_image.rot270_clockwise(),
4 => self.sub_image.flip_vertical(),
5 => self.sub_image.flip_horizontal(),
6 => self.sub_image.flip_main_diagonal(),
7 => self.sub_image.flip_sub_diagonal(),
_ => unreachable!("not a valid form index: {}", idx),
}
}
}
pub struct BigImage {
tiles: Vec<Tile>,
shape: usize,
}
impl BigImage {
pub fn new(tiles: Vec<Tile>) -> Self {
let shape = (tiles.len() as f64).sqrt() as usize;
Self { shape, tiles }
}
pub fn | <'a>(
&'a self,
row: usize,
col: usize,
prev_images: &[(&'a Tile, usize)],
) -> Vec<(&'a Tile, usize)> {
let mut result: Vec<(&Tile, usize)> = vec![];
result.extend_from_slice(prev_images);
for tile in self.tiles.iter() {
if result.iter().any(|(t, _)| t == &tile) {
continue;
}
result.push((tile, 0));
let upper_tile = if row > 0 {
Some(result[(row - 1) * self.shape + col])
} else {
None
};
let left_tile = if col > 0 {
Some(result[row * self.shape + col - 1])
} else {
None
};
for idx in 0..8 {
result.last_mut().unwrap().1 = idx;
if (row == 0
|| tile.borders[idx].0
== upper_tile.unwrap().0.borders[upper_tile.unwrap().1].2)
&& (col == 0
|| tile.borders[idx].3
== left_tile.unwrap().0.borders[left_tile.unwrap().1].1)
{
if row == self.shape - 1 && col == self.shape - 1 {
return result;
}
let (new_row, new_col) = if col + 1 >= self.shape {
(row + 1, 0)
} else {
(row, col + 1)
};
let ret = self.fits(new_row, new_col, &result);
if !ret.is_empty() {
return ret;
}
}
}
result.pop();
}
vec![]
}
pub fn splice_result(&self, fit_result: &[(&Tile, usize)]) -> Array2<u8> {
let pixels = fit_result[0].0.sub_image.shape()[0];
let mut big_image = Array2::<u8>::zeros((0, self.shape * pixels));
for row in 0..self.shape {
let mut row_image = Array2::<u8>::zeros((pixels, 0));
for col in 0..self.shape {
let result = fit_result[row * self.shape + col];
row_image = concatenate![Axis(1), row_image, result.0.get_sub_image(result.1)];
}
big_image = concatenate![Axis(0), big_image, row_image];
}
big_image
}
}
pub fn part1_solution(fit_result: &[(&Tile, usize)]) -> usize {
let shape = (fit_result.len() as f64).sqrt() as usize;
let corner_idx = &[0, shape - 1, shape * (shape - 1), shape * shape - 1];
fit_result
.iter()
.enumerate()
.filter(|(idx, _)| corner_idx.contains(idx))
.map(|(_, (t, _))| t.tile_id)
.product()
}
lazy_static! {
static ref MONSTER: Array2<u8> = unsafe {
Array2::from_shape_vec_unchecked(
(3, 20),
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1 | fits | identifier_name |
lib.rs | 90 degree clockwise
(bottom_rev, left_rev, top_rev, right_rev), // rotate 180 degree clockwise
(right, bottom_rev, left, top_rev), // rotate 270 degree clockwise
(bottom, right_rev, top, left_rev), // flip vertical
(top_rev, left, bottom_rev, right), // flip horizontal
(left, bottom, right, top), // flip along main diagonal
(right_rev, top_rev, left_rev, bottom_rev), // flip along sub diagonal
],
}
}
pub fn get_sub_image(&self, idx: usize) -> Array2<u8> {
match idx {
0 => self.sub_image.original(),
1 => self.sub_image.rot90_clockwise(),
2 => self.sub_image.rot180_clockwise(),
3 => self.sub_image.rot270_clockwise(),
4 => self.sub_image.flip_vertical(),
5 => self.sub_image.flip_horizontal(),
6 => self.sub_image.flip_main_diagonal(),
7 => self.sub_image.flip_sub_diagonal(),
_ => unreachable!("not a valid form index: {}", idx),
}
}
}
pub struct BigImage {
tiles: Vec<Tile>,
shape: usize,
}
impl BigImage {
pub fn new(tiles: Vec<Tile>) -> Self {
let shape = (tiles.len() as f64).sqrt() as usize;
Self { shape, tiles }
}
pub fn fits<'a>(
&'a self,
row: usize,
col: usize,
prev_images: &[(&'a Tile, usize)],
) -> Vec<(&'a Tile, usize)> {
let mut result: Vec<(&Tile, usize)> = vec![];
result.extend_from_slice(prev_images);
for tile in self.tiles.iter() {
if result.iter().any(|(t, _)| t == &tile) {
continue;
}
result.push((tile, 0));
let upper_tile = if row > 0 {
Some(result[(row - 1) * self.shape + col])
} else {
None
};
let left_tile = if col > 0 {
Some(result[row * self.shape + col - 1])
} else {
None
};
for idx in 0..8 {
result.last_mut().unwrap().1 = idx;
if (row == 0
|| tile.borders[idx].0
== upper_tile.unwrap().0.borders[upper_tile.unwrap().1].2)
&& (col == 0
|| tile.borders[idx].3
== left_tile.unwrap().0.borders[left_tile.unwrap().1].1)
{
if row == self.shape - 1 && col == self.shape - 1 {
return result;
}
let (new_row, new_col) = if col + 1 >= self.shape {
(row + 1, 0)
} else {
(row, col + 1)
};
let ret = self.fits(new_row, new_col, &result);
if !ret.is_empty() {
return ret;
}
}
}
result.pop();
}
vec![]
}
pub fn splice_result(&self, fit_result: &[(&Tile, usize)]) -> Array2<u8> {
let pixels = fit_result[0].0.sub_image.shape()[0];
let mut big_image = Array2::<u8>::zeros((0, self.shape * pixels));
for row in 0..self.shape {
let mut row_image = Array2::<u8>::zeros((pixels, 0));
for col in 0..self.shape {
let result = fit_result[row * self.shape + col];
row_image = concatenate![Axis(1), row_image, result.0.get_sub_image(result.1)];
}
big_image = concatenate![Axis(0), big_image, row_image];
}
big_image
}
}
pub fn part1_solution(fit_result: &[(&Tile, usize)]) -> usize {
let shape = (fit_result.len() as f64).sqrt() as usize;
let corner_idx = &[0, shape - 1, shape * (shape - 1), shape * shape - 1];
fit_result
.iter()
.enumerate()
.filter(|(idx, _)| corner_idx.contains(idx))
.map(|(_, (t, _))| t.tile_id)
.product()
}
lazy_static! {
static ref MONSTER: Array2<u8> = unsafe {
Array2::from_shape_vec_unchecked(
(3, 20),
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0,
1, 0, 0, 0,
],
)
};
}
fn find_all_monsters(image: &Array2<u8>) -> Vec<(usize, usize)> {
let shape = image.shape()[0];
let mut found = vec![];
for row in 0..=shape - MONSTER.shape()[0] {
for col in 0..=shape - MONSTER.shape()[1] {
if &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) & &MONSTER.slice(s![.., ..])
== MONSTER.slice(s![.., ..])
{
found.push((row, col));
}
}
}
found
}
pub fn part2_solution(big_image: &BigImage, fit_result: &[(&Tile, usize)]) -> usize {
let mut image = big_image.splice_result(fit_result);
let monsters_pos = find_all_monsters(&image);
for (row, col) in monsters_pos {
let region = &image.slice(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
]) - &MONSTER.slice(s![.., ..]);
image
.slice_mut(s![
row..row + MONSTER.shape()[0],
col..col + MONSTER.shape()[1]
])
.assign(&(region));
}
image.iter().map(|x| *x as usize).sum::<usize>()
}
pub fn read_input(input_file: &str) -> Vec<Tile> {
std::fs::read_to_string(input_file)
.unwrap()
.split("\n\n")
.filter(|&b| !b.trim().is_empty())
.map(|b| Tile::new(b))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::Array;
#[test]
fn test_matrix_transforms() {
let m = Array::range(1., 5., 1.).into_shape((2, 2)).unwrap();
assert_eq!(m.original(), ndarray::arr2(&[[1., 2.], [3., 4.]]));
assert_eq!(m.rot90_clockwise(), ndarray::arr2(&[[3., 1.], [4., 2.]]));
assert_eq!(m.rot180_clockwise(), ndarray::arr2(&[[4., 3.], [2., 1.]]));
assert_eq!(m.rot270_clockwise(), ndarray::arr2(&[[2., 4.], [1., 3.]]));
assert_eq!(m.flip_vertical(), ndarray::arr2(&[[3., 4.], [1., 2.]]));
assert_eq!(m.flip_horizontal(), ndarray::arr2(&[[2., 1.], [4., 3.]]));
assert_eq!(m.flip_main_diagonal(), ndarray::arr2(&[[1., 3.], [2., 4.]]));
assert_eq!(m.flip_sub_diagonal(), ndarray::arr2(&[[4., 2.], [3., 1.]]));
}
#[test]
fn test_part1() {
let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part1_solution(&result), 20899048083289);
}
#[test]
fn test_part2() { | let testcase = read_input("../testcase1.txt");
let test_image = BigImage::new(testcase);
let result = vec![];
let result = test_image.fits(0, 0, &result);
assert_eq!(part2_solution(&test_image, &result), 273); | random_line_split |
|
lib.rs | Array2<T>
where
T: Copy,
{
fn original(&self) -> Array2<T> {
self.clone()
}
fn rot90_clockwise(&self) -> Array2<T> {
let mut arr = self.clone();
arr.swap_axes(0, 1);
arr.flip_horizontal()
}
fn rot180_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..;-1]));
arr
}
fn rot270_clockwise(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr.swap_axes(0, 1);
arr
}
fn flip_vertical(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![..;-1, ..]));
arr
}
fn flip_horizontal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.slice(s![.., ..;-1]));
arr
}
fn flip_main_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.t());
arr
}
fn flip_sub_diagonal(&self) -> Array2<T> {
let shape = self.shape()[0];
let mut arr: Array2<T> = unsafe { Array2::uninitialized((shape, shape)) };
arr.assign(&self.rot270_clockwise().t());
arr.rot90_clockwise()
}
}
#[allow(unused)]
#[derive(Eq)]
pub struct Tile {
tile_id: usize,
sub_image: Array2<u8>,
borders: Vec<(u32, u32, u32, u32)>,
}
impl PartialEq for Tile {
fn eq(&self, other: &Self) -> bool {
self.tile_id == other.tile_id
}
}
use std::fmt::Debug;
impl Debug for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{}]", self.tile_id)?;
Ok(())
}
}
impl Tile {
pub fn new(data: &str) -> Self | let (bottom, bottom_rev) = parse_tile::parse_border(&lines[lines.len() - 1]).unwrap();
let mut sub_image = unsafe { Array2::<u8>::uninitialized((shape, shape)) };
for (i, row) in lines.iter().enumerate().skip(2).take(shape) {
let row_pixels = parse_tile::parse_sub_image(&row[1..row.len() - 1]).unwrap();
sub_image.row_mut(i - 2).assign(&row_pixels);
}
Self {
tile_id,
sub_image,
borders: vec![
(top, right, bottom, left), // original sub image
(left_rev, top, right_rev, bottom), // rotate 90 degree clockwise
(bottom_rev, left_rev, top_rev, right_rev), // rotate 180 degree clockwise
(right, bottom_rev, left, top_rev), // rotate 270 degree clockwise
(bottom, right_rev, top, left_rev), // flip vertical
(top_rev, left, bottom_rev, right), // flip horizontal
(left, bottom, right, top), // flip along main diagonal
(right_rev, top_rev, left_rev, bottom_rev), // flip along sub diagonal
],
}
}
pub fn get_sub_image(&self, idx: usize) -> Array2<u8> {
match idx {
0 => self.sub_image.original(),
1 => self.sub_image.rot90_clockwise(),
2 => self.sub_image.rot180_clockwise(),
3 => self.sub_image.rot270_clockwise(),
4 => self.sub_image.flip_vertical(),
5 => self.sub_image.flip_horizontal(),
6 => self.sub_image.flip_main_diagonal(),
7 => self.sub_image.flip_sub_diagonal(),
_ => unreachable!("not a valid form index: {}", idx),
}
}
}
pub struct BigImage {
tiles: Vec<Tile>,
shape: usize,
}
impl BigImage {
pub fn new(tiles: Vec<Tile>) -> Self {
let shape = (tiles.len() as f64).sqrt() as usize;
Self { shape, tiles }
}
pub fn fits<'a>(
&'a self,
row: usize,
col: usize,
prev_images: &[(&'a Tile, usize)],
) -> Vec<(&'a Tile, usize)> {
let mut result: Vec<(&Tile, usize)> = vec![];
result.extend_from_slice(prev_images);
for tile in self.tiles.iter() {
if result.iter().any(|(t, _)| t == &tile) {
continue;
}
result.push((tile, 0));
let upper_tile = if row > 0 {
Some(result[(row - 1) * self.shape + col])
} else {
None
};
let left_tile = if col > 0 {
Some(result[row * self.shape + col - 1])
} else {
None
};
for idx in 0..8 {
result.last_mut().unwrap().1 = idx;
if (row == 0
|| tile.borders[idx].0
== upper_tile.unwrap().0.borders[upper_tile.unwrap().1].2)
&& (col == 0
|| tile.borders[idx].3
== left_tile.unwrap().0.borders[left_tile.unwrap().1].1)
{
if row == self.shape - 1 && col == self.shape - 1 {
return result;
}
let (new_row, new_col) = if col + 1 >= self.shape {
(row + 1, 0)
} else {
(row, col + 1)
};
let ret = self.fits(new_row, new_col, &result);
if !ret.is_empty() {
return ret;
}
}
}
result.pop();
}
vec![]
}
pub fn splice_result(&self, fit_result: &[(&Tile, usize)]) -> Array2<u8> {
let pixels = fit_result[0].0.sub_image.shape()[0];
let mut big_image = Array2::<u8>::zeros((0, self.shape * pixels));
for row in 0..self.shape {
let mut row_image = Array2::<u8>::zeros((pixels, 0));
for col in 0..self.shape {
let result = fit_result[row * self.shape + col];
row_image = concatenate![Axis(1), row_image, result.0.get_sub_image(result.1)];
}
big_image = concatenate![Axis(0), big_image, row_image];
}
big_image
}
}
pub fn part1_solution(fit_result: &[(&Tile, usize)]) -> usize {
let shape = (fit_result.len() as f64).sqrt() as usize;
let corner_idx = &[0, shape - 1, shape * (shape - 1), shape * shape - 1];
fit_result
.iter()
.enumerate()
.filter(|(idx, _)| corner_idx.contains(idx))
.map(|(_, (t, _))| t.tile_id)
.product()
}
lazy_static! {
static ref MONSTER: Array2<u8> = unsafe {
Array2::from_shape_vec_unchecked(
(3, 20),
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1 | {
let lines = data
.split('\n')
.map(|s| s.trim_end().to_string())
.collect::<Vec<_>>();
let shape = lines[1].len() - 2;
let tile_id = parse_tile::parse_tile_id(&lines[0]).unwrap();
let (top, top_rev) = parse_tile::parse_border(&lines[1]).unwrap();
let left_col = lines
.iter()
.skip(1)
.map(|s| s.chars().next().unwrap())
.collect::<String>();
let (left, left_rev) = parse_tile::parse_border(&left_col).unwrap();
let right_col = lines
.iter()
.skip(1)
.map(|s| s.chars().last().unwrap())
.collect::<String>();
let (right, right_rev) = parse_tile::parse_border(&right_col).unwrap(); | identifier_body |
tasty_trade_importer.py | '''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.extend(getFormattedRowsForMoneyMoneyMovement(day, money_movement))
# get options/common trades
all_trades = getTradesForDay(day, og_rows, all_trades)
if all_trades:
# write row
# we only write out the p/l when the trade is over (quantity 0)
formatted_rows.extend(getFormattedRowsForTrades(day, all_trades))
# remove finished trades
all_trades = remove_completed_trades(all_trades)
# TODO: persist swing trades for next times
if all_trades:
print('*** these trades are still in progress {}'.format(all_trades))
# output csv
output_formatted_csv(formatted_rows)
print('done')
def copy_rows(csvfile):
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
copied = copy.deepcopy(row)
rows.append(copied)
return rows
def getDays(og_rows):
# get the unique days in the csv
unique_days = set()
for row in og_rows:
mdy = arrow.get(row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
unique_days.add(mdy)
# sort days ascending
arrow_days = [arrow.get(u_d, 'MM/DD/YYYY') for u_d in unique_days]
arrow_days.sort()
string_days = [a_d.format('MM/DD/YYYY') for a_d in arrow_days]
print('found {} trading days'.format(len(unique_days)))
return string_days
def sameDay(mdy, og_row):
og_mdy = arrow.get(og_row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
return mdy == og_mdy
def moneyMovementForDay(day, og_rows):
money_movement = []
# get each money movement event for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
if 'money movement' in row['Transaction Code'].lower():
money_movement.append(getAmount(row))
return money_movement
def getTradesForDay(day, og_rows, trades):
# TODO: support long term swing trades (need a db ;))
# trades = {}
# group by symbol (commons or options)
'''
{
symbol:{
commons: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
options: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else: | trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity'])
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def getFormattedRowsForMoneyMoneyMovement(day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) * | random_line_split |
|
tasty_trade_importer.py | quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity'])
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def getFormattedRowsForMoneyMoneyMovement(day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) * 100
return str(round(percent_gain, 2))
def create_trade_dict():
trades = {
'commons': create_emtpy_common_or_options_dict(),
'options': create_emtpy_common_or_options_dict()
}
return trades
def create_emtpy_common_or_options_dict():
shell = {
'net_amount': 0,
'quantity': 0,
'amount_bought': 0,
'amount_sold': 0
}
return shell
def is_trade_type_empty(trade_type):
# common_zeros = [ value for key, value in trade_type['commons'].items() if value == 0]
# option_zeros = [ value for key, value in trade_type['options'].items() if value == 0]
# common_zeros.extend(option_zeros)
# return len(common_zeros) == 0
return trade_type['commons']['quantity'] == 0 and trade_type['options']['quantity'] == 0
# ======== Row funcs ===========
def amountWithFees(og_row):
fees = float(og_row['Fees'])
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price) + fees
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def getAmount(og_row):
price = float(og_row['Amount'])
# if negative, it is a purchase
isPurchase = price < 0
amount = math.fabs(price)
# neg val if purchased
if isPurchase:
return amount * -1
return amount
def isPurchase(og_row):
# negative is purchase
return getAmount(og_row) < 0
def isOption(og_row):
# is option trade?
if not og_row['Call/Put']:
return False
return True
def isCallOption(og_row):
if isOption(og_row):
if 'c' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def isPutOption(og_row):
if isOption(og_row):
if 'p' in og_row['Call/Put'].lower():
return True
else:
return False
return False
def output_formatted_csv(formatted_rows):
| print('...creating csv')
with open('formatted_tt.csv', 'w', newline='') as out_csvfile:
fieldnames = ['transaction_type','account','date','symbol','quantity','stock','option','p_l', '%']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames)
writer.writeheader()
for formatted in formatted_rows:
writer.writerow(formatted)
print('finished writing csv')
'''create a csv with
'''
# save deposits
# save withdrawls
# save balance adustments = comes out of account andrew | identifier_body |
|
tasty_trade_importer.py | '''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.extend(getFormattedRowsForMoneyMoneyMovement(day, money_movement))
# get options/common trades
all_trades = getTradesForDay(day, og_rows, all_trades)
if all_trades:
# write row
# we only write out the p/l when the trade is over (quantity 0)
formatted_rows.extend(getFormattedRowsForTrades(day, all_trades))
# remove finished trades
all_trades = remove_completed_trades(all_trades)
# TODO: persist swing trades for next times
if all_trades:
print('*** these trades are still in progress {}'.format(all_trades))
# output csv
output_formatted_csv(formatted_rows)
print('done')
def copy_rows(csvfile):
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
copied = copy.deepcopy(row)
rows.append(copied)
return rows
def getDays(og_rows):
# get the unique days in the csv
unique_days = set()
for row in og_rows:
mdy = arrow.get(row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
unique_days.add(mdy)
# sort days ascending
arrow_days = [arrow.get(u_d, 'MM/DD/YYYY') for u_d in unique_days]
arrow_days.sort()
string_days = [a_d.format('MM/DD/YYYY') for a_d in arrow_days]
print('found {} trading days'.format(len(unique_days)))
return string_days
def sameDay(mdy, og_row):
og_mdy = arrow.get(og_row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
return mdy == og_mdy
def moneyMovementForDay(day, og_rows):
money_movement = []
# get each money movement event for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
if 'money movement' in row['Transaction Code'].lower():
money_movement.append(getAmount(row))
return money_movement
def getTradesForDay(day, og_rows, trades):
# TODO: support long term swing trades (need a db ;))
# trades = {}
# group by symbol (commons or options)
'''
{
symbol:{
commons: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
options: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
|
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def getFormattedRowsForMoneyMoneyMovement(day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) * | netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity']) | conditional_block |
tasty_trade_importer.py | '''
og_rows = copy_rows(csvfile)
unique_days = getDays(og_rows)
# list of dicts, each dict is a csv row
all_trades = {}
formatted_rows = []
for day in unique_days:
# get account credits and debits and balance changes
money_movement = moneyMovementForDay(day, og_rows)
if money_movement:
formatted_rows.extend(getFormattedRowsForMoneyMoneyMovement(day, money_movement))
# get options/common trades
all_trades = getTradesForDay(day, og_rows, all_trades)
if all_trades:
# write row
# we only write out the p/l when the trade is over (quantity 0)
formatted_rows.extend(getFormattedRowsForTrades(day, all_trades))
# remove finished trades
all_trades = remove_completed_trades(all_trades)
# TODO: persist swing trades for next times
if all_trades:
print('*** these trades are still in progress {}'.format(all_trades))
# output csv
output_formatted_csv(formatted_rows)
print('done')
def copy_rows(csvfile):
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
copied = copy.deepcopy(row)
rows.append(copied)
return rows
def getDays(og_rows):
# get the unique days in the csv
unique_days = set()
for row in og_rows:
mdy = arrow.get(row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
unique_days.add(mdy)
# sort days ascending
arrow_days = [arrow.get(u_d, 'MM/DD/YYYY') for u_d in unique_days]
arrow_days.sort()
string_days = [a_d.format('MM/DD/YYYY') for a_d in arrow_days]
print('found {} trading days'.format(len(unique_days)))
return string_days
def sameDay(mdy, og_row):
og_mdy = arrow.get(og_row['Date/Time'], 'MM/DD/YYYY h:mm A').format('MM/DD/YYYY')
return mdy == og_mdy
def moneyMovementForDay(day, og_rows):
money_movement = []
# get each money movement event for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
if 'money movement' in row['Transaction Code'].lower():
money_movement.append(getAmount(row))
return money_movement
def getTradesForDay(day, og_rows, trades):
# TODO: support long term swing trades (need a db ;))
# trades = {}
# group by symbol (commons or options)
'''
{
symbol:{
commons: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
options: {
net_amount: float
quantity: int
amount_bought
amount_sold
}
}
}
'''
# calc all trades for this day
for row in og_rows:
# if it's the same day
if sameDay(day, row):
# calulate all trades for every symbol
if 'trade' in row['Transaction Code'].lower():
symbol = row['Symbol']
if isOption(row):
# amount with fees
netAmount = amountWithFees(row)
# save option trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['options']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['options']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['options']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['options']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['options']['quantity'] -= int(row['Quantity'])
else:
# amount with fees
netAmount = amountWithFees(row)
# save stock trades
if symbol not in trades:
trades[symbol] = create_trade_dict()
trades[symbol]['commons']['net_amount'] += netAmount
# update buy and sell totals
if isPurchase(row):
trades[symbol]['commons']['amount_bought'] += math.fabs(netAmount)
# increase total holdings
trades[symbol]['commons']['quantity'] += int(row['Quantity'])
else:
trades[symbol]['commons']['amount_sold'] += math.fabs(netAmount)
# reduce total holdings
trades[symbol]['commons']['quantity'] -= int(row['Quantity'])
print('calulated all {} trades for {}'.format(len(trades.items()), day))
return trades
def is_commons_swing_trade(symbol, trade_type):
return trade_type['commons']['quantity'] != 0
def is_options_swing_trade(symbol, trade_type):
return trade_type['options']['quantity'] != 0
def get_swing_trades(swing_trades, trades):
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type) or is_options_swing_trade(symbol, trade_type):
# save most up to date trade info
swing_trades[symbol] = trade_type
return swing_trades
def remove_completed_trades(trades):
symbols_to_delete = []
for symbol, trade_type in trades.items():
if not is_commons_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
if not is_options_swing_trade(symbol, trade_type):
# trade is complete
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
if is_trade_type_empty(trade_type):
symbols_to_delete.append(symbol)
for symbol in symbols_to_delete:
trades.pop(symbol, None)
return trades
def removeSwingTrades(trades):
# remove trades that are not day trades. TODO support it sometime in the future
# the quantity should be 0 if it was a day trade
for symbol, trade_type in trades.items():
if is_commons_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Commons: {}. Do it manually******'.format(symbol, trade_type['commons']['quantity']))
trades[symbol]['commons'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
if is_options_swing_trade(symbol, trade_type):
print('*****Removing swing trade for {} Options: {}. Do it manually******'.format(symbol, trade_type['options']['quantity']))
trades[symbol]['options'] = create_emtpy_common_or_options_dict()
# TODO: save trade_type object
return trades
def | (day, money_movement):
formatted_rows = []
for event in money_movement:
formatted_row = {
'transaction_type': 'money_transfer',
'account': None,
'date': day,
'symbol': None,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(event, 2)),
'%': 0
}
formatted_rows.append(formatted_row)
return formatted_rows
def getFormattedRowsForTrades(day, trades):
formatted_rows = []
# output rows for each trade symbol p/l for day in trades
# for all options
for symbol, trade_type in trades.items():
# print('{} {} {}'.format(symbol, trade_type['options']['quantity'], trade_type['options']['quantity']))
if trade_type['options']['quantity'] == 0 and trade_type['options']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['options']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['options']['amount_bought'], trade_type['options']['amount_sold'])
}
formatted_rows.append(formatted_row)
# for all commons
for symbol, trade_type in trades.items():
if trade_type['commons']['quantity'] == 0 and trade_type['commons']['net_amount'] != 0:
formatted_row = {
'transaction_type': 'trade',
'account': None,
'date': day,
'symbol': symbol,
'quantity': None, # doesn't matter
'stock': None,
'option': None,
'p_l': str(round(trade_type['commons']['net_amount'], 2)),
'%': calculatePercentGain(trade_type['commons']['amount_bought'], trade_type['commons']['amount_sold'])
}
formatted_rows.append(formatted_row)
return formatted_rows
def calculatePercentGain(bought, sold):
percent_gain = ((sold - bought)/bought) | getFormattedRowsForMoneyMoneyMovement | identifier_name |
redis_performance_monitor.js | (err.code);
}
else if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
}).call(this);
// ############################################################################
// PARSE INPUT
/**
* Verify number of passed arguments into the script.
*/
function monitorInput(args)
{
args = args.slice(2);
if(args.length != 4)
throw new InvalidParametersNumberError();
monitorInputProcess(args);
}
/**
* Process the passed arguments and send them to monitor execution.
* Receive: arguments to be processed
*/
function monitorInputProcess(args)
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '6379';
// <USER_NAME>
var username = args[3];
username = username.length === 0 ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function output(metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimalPlaces);
}
else
{
var elapsedTime = (new Date(endMetric.timestamp).getTime() - new Date(initMetric.timestamp).getTime()) / 1000;
deltaValue = ((parseFloat(endMetric.value) - parseFloat(initMetric.value))/elapsedTime).toFixed(decimalPlaces);
}
return deltaValue;
}
/**
* Get last results if any saved
* Receive:
* - hostname or ip address
* - port
*/
function getFile(hostname, port)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
try
{
fs.readdirSync(dirPath);
var file = fs.readFileSync(filePath, 'utf8');
if (file.toString('utf8').trim())
{
return file.toString('utf8').trim();
}
else
{
return null;
}
}
catch(e)
{
return null;
}
}
/**
* Save current metrics values to be used to calculate ratios on next runs
* Receive:
* - hostname or ip address
* - port
* - retrieved result
*/
function setFile(hostname, port, json)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
if (!fs.existsSync(dirPath))
| {
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
} | conditional_block |
|
redis_performance_monitor.js | ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function output(metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimalPlaces);
}
else
{
var elapsedTime = (new Date(endMetric.timestamp).getTime() - new Date(initMetric.timestamp).getTime()) / 1000;
deltaValue = ((parseFloat(endMetric.value) - parseFloat(initMetric.value))/elapsedTime).toFixed(decimalPlaces);
}
return deltaValue;
}
/**
* Get last results if any saved
* Receive:
* - hostname or ip address
* - port
*/
function getFile(hostname, port)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
try
{
fs.readdirSync(dirPath);
var file = fs.readFileSync(filePath, 'utf8');
if (file.toString('utf8').trim())
{
return file.toString('utf8').trim();
}
else
{
return null;
}
}
catch(e)
{
return null;
}
}
/**
* Save current metrics values to be used to calculate ratios on next runs
* Receive:
* - hostname or ip address
* - port
* - retrieved result
*/
function setFile(hostname, port, json)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
if (!fs.existsSync(dirPath))
{
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
try
{
fs.writeFileSync(filePath, json);
}
catch(e)
{
var ex = new WriteOnTmpFileError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
// ############################################################################
// ERROR HANDLER
/**
* Used to handle errors of async functions
* Receive: Error/Exception
*/
function errorHandler(err)
{
if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof MetricNotFoundError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof CreateTmpDirError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof WriteOnTmpFileError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
// ############################################################################
// EXCEPTIONS
/**
* Exceptions used in this script.
*/
function InvalidParametersNumberError() {
this.name = "InvalidParametersNumberError";
this.message = "Wrong number of parameters.";
this.code = 3;
}
InvalidParametersNumberError.prototype = Object.create(Error.prototype);
InvalidParametersNumberError.prototype.constructor = InvalidParametersNumberError;
function InvalidAuthenticationError() { | random_line_split |
||
redis_performance_monitor.js | )
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '6379';
// <USER_NAME>
var username = args[3];
username = username.length === 0 ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function output(metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimalPlaces);
}
else
{
var elapsedTime = (new Date(endMetric.timestamp).getTime() - new Date(initMetric.timestamp).getTime()) / 1000;
deltaValue = ((parseFloat(endMetric.value) - parseFloat(initMetric.value))/elapsedTime).toFixed(decimalPlaces);
}
return deltaValue;
}
/**
* Get last results if any saved
* Receive:
* - hostname or ip address
* - port
*/
function getFile(hostname, port)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
try
{
fs.readdirSync(dirPath);
var file = fs.readFileSync(filePath, 'utf8');
if (file.toString('utf8').trim())
{
return file.toString('utf8').trim();
}
else
{
return null;
}
}
catch(e)
{
return null;
}
}
/**
* Save current metrics values to be used to calculate ratios on next runs
* Receive:
* - hostname or ip address
* - port
* - retrieved result
*/
function setFile(hostname, port, json)
{
var dirPath = __dirname + tempDir + "/";
var filePath = dirPath + ".redis_"+ hostname +"_"+ port +".dat";
if (!fs.existsSync(dirPath))
{
try
{
fs.mkdirSync( __dirname + tempDir);
}
catch(e)
{
var ex = new CreateTmpDirError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
try
{
fs.writeFileSync(filePath, json);
}
catch(e)
{
var ex = new WriteOnTmpFileError(e.message);
ex.message = e.message;
errorHandler(ex);
}
}
// ############################################################################
// ERROR HANDLER
/**
* Used to handle errors of async functions
* Receive: Error/Exception
*/
function errorHandler(err)
| {
if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof MetricNotFoundError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof CreateTmpDirError)
{
console.log(err.message);
process.exit(err.code); | identifier_body |
|
redis_performance_monitor.js | 8:Commands/Sec:4', key : 'total_commands_processed', ratio : true };
metrics['KeyHits'] = { id : '1399:Key Hits:4', key : 'keyspace_hits', ratio : false };
metrics['KeyMisses'] = { id : '1400:Key Misses:4', key : 'keyspace_misses', ratio : false };
metrics['KeysEvicted'] = { id : '1401:Keys Evicted:4', key : 'evicted_keys', ratio : false };
metrics['KeysExpired'] = { id : '1402:Keys Expired:4', key : 'expired_keys', ratio : false };
metrics['BackgroundSaveInProgress'] = { id : '1403:Background Save:9', key : 'rdb_bgsave_in_progress', ratio : false };
metrics['ChangesSinceLastSave'] = { id : '1404:Changes since last Save:4', key : 'rdb_changes_since_last_save', ratio : false };
metrics['ConnectedSlaves'] = { id : '1405:Connected Slaves:4', key : 'connected_slaves', ratio : false };
var tempDir = '/tmp';
var sleepTime = 1000;
/**
* Entry point.
*/
(function() {
try
{
monitorInput(process.argv);
}
catch(err)
{
if(err instanceof InvalidParametersNumberError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof InvalidAuthenticationError)
{
console.log(err.message);
process.exit(err.code);
}
else if(err instanceof UnknownHostError)
{
console.log(err.message);
process.exit(err.code);
}
else
{
console.log(err.message);
process.exit(1);
}
}
}).call(this);
// ############################################################################
// PARSE INPUT
/**
* Verify number of passed arguments into the script.
*/
function monitorInput(args)
{
args = args.slice(2);
if(args.length != 4)
throw new InvalidParametersNumberError();
monitorInputProcess(args);
}
/**
* Process the passed arguments and send them to monitor execution.
* Receive: arguments to be processed
*/
function monitorInputProcess(args)
{
//<METRIC_STATE>
var metricState = args[0].replace('"', '');
var tokens = metricState.split(',');
var metricsExecution = new Array(7);
for (var i in tokens)
metricsExecution[i] = (tokens[i] === '1');
//<HOST>
var hostname = args[1];
//<PORT>
var port = args[2];
if (port.length === 0)
port = '6379';
// <USER_NAME>
var username = args[3];
username = username.length === 0 ? '' : username;
username = username === '""' ? '' : username;
if (username.length === 1 && username === '"')
username = '';
// <PASS_WORD>
var passwd = args[3];
passwd = passwd.length === 0 ? '' : passwd;
passwd = passwd === '""' ? '' : passwd;
if (passwd.length === 1 && passwd === '"')
passwd = '';
// Create request object to be executed.
var request = new Object();
request.checkMetrics = metricsExecution;
request.hostname = hostname;
request.port = port;
request.passwd = passwd;
// Call monitor.
monitorRedis(request);
}
// ############################################################################
// GET METRICS
/**
* Retrieve metrics information
* Receive: object request containing configuration
*
* HTTP request to retrieve data
* Receive:
* - request: object containing request configuration
*/
function monitorRedis(request)
{
var metricsObj = [];
var client = redis.createClient(request.port, request.hostname, {});
if (request.passwd !== '')
{
client.auth(request.passwd);
}
client.on('connect', function() {
processInfo(client, metricsObj, request);
});
client.on('error', function (err) {
if (err !== undefined && (err.message.indexOf('NOAUTH') != -1 || err.message.indexOf('invalid password') != -1))
{
client.quit();
errorHandler(new InvalidAuthenticationError());
}
if (err !== undefined && (err.message.indexOf('ENETUNREACH') != -1 || err.message.indexOf('ECONNREFUSED') != -1))
{
client.quit();
errorHandler(new UnknownHostError());
}
errorHandler(err.message);
});
}
/**
* Get metrics from INFO command.
*/
function processInfo(client, metricsObj, request)
{
client.info(function(err, data) {
var data = parseInfo(data);
var jsonString = '[';
var dateTime = new Date().toISOString();
var i = 0;
for(var key in metrics)
{
if (request.checkMetrics[i])
{
var metric = metrics[key];
var val = data[metric.key] + '';
if (key === 'BackgroundSaveInProgress')
val = val === '0' ? 1 : 0;
if (key === 'UsedMemory')
val = parseInt(val, 10) / 1024 / 1024;
jsonString += '{';
jsonString += '"variableName":"' + key + '",';
jsonString += '"metricUUID":"' + metric.id + '",';
jsonString += '"timestamp":"' + dateTime + '",';
jsonString += '"value":"' + val + '"';
jsonString += '},';
}
i++;
}
if(jsonString.length > 1)
jsonString = jsonString.slice(0, jsonString.length - 1);
jsonString += ']';
processDeltas(request, jsonString);
client.quit();
});
}
/**
* Parse INFO command output.
*/
function parseInfo(info)
{
var lines = info.split('\r\n');
var obj = {};
for (var i = 0, l = info.length; i < l; i++)
{
var line = lines[i];
if (line && line.split)
{
line = line.split(':');
if (line.length > 1)
{
var key = line.shift();
obj[key] = line.join(':');
}
}
}
return obj;
}
// ############################################################################
// OUTPUT METRICS
/**
* Send metrics to console
* Receive: metrics list to output
*/
function | (metrics)
{
for (var i in metrics)
{
var out = "";
var metric = metrics[i];
out += metric.id;
out += "|";
out += metric.value;
out += "|";
console.log(out);
}
}
// ############################################################################
// RATE PROCESSING
/**
* Process performance results
* Receive:
* - request object containing configuration
* - retrived results
*/
function processDeltas(request, results)
{
var file = getFile(request.hostname, request.port);
var toOutput = [];
if (file)
{
var previousData = JSON.parse(file);
var newData = JSON.parse(results);
for(var i = 0; i < newData.length; i++)
{
var endMetric = newData[i];
var initMetric = null;
for(var j = 0; j < previousData.length; j++)
{
if(previousData[j].metricUUID === newData[i].metricUUID)
{
initMetric = previousData[j];
break;
}
}
if (initMetric != null)
{
var deltaValue = getDelta(initMetric, endMetric);
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = deltaValue;
toOutput.push(rateMetric);
}
else
{
var rateMetric = new Object();
rateMetric.id = endMetric.metricUUID;
rateMetric.timestamp = endMetric.timestamp;
rateMetric.value = 0;
toOutput.push(rateMetric);
}
}
setFile(request.hostname, request.port, results);
for (var m = 0; m < toOutput.length; m++)
{
for (var z = 0; z < newData.length; z++)
{
var systemMetric = metrics[newData[z].variableName];
if (systemMetric.ratio === false && newData[z].metricUUID === toOutput[m].id)
{
toOutput[m].value = newData[z].value;
break;
}
}
}
output(toOutput)
}
else
{
setFile(request.hostname, request.port, results);
// Execute again.
setTimeout(function() {
monitorInput(process.argv);
}, sleepTime);
}
}
/**
* Calculate ratio metric's value
* Receive:
* - previous value
* - current value
* -
*/
function getDelta(initMetric, endMetric)
{
var deltaValue = 0;
var decimalPlaces = 2;
var date = new Date().toISOString();
if (parseFloat(endMetric.value) < parseFloat(initMetric.value))
{
deltaValue = parseFloat(endMetric.value).toFixed(decimal | output | identifier_name |
script.js | then((response) => {
return response.json()
}).then((data) =>{
if(data[statecode]!==undefined)
{
if(data[statecode]["total"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["confirmed"])
}
if(data[statecode]["total"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["recovered"])
}
//if none one died pushing zero
if(data[statecode]["total"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["deceased"])
}
//call functio to add data to tabel isstatus =0
addDataToTable(data_array,prev_data_array,0);
}
}).catch(handleError);
}
function addDataToTable(data_array,prev_data_array,isstatus,isold=0)
{
//retrive the valu from array
let date=data_array[0];
let active=data_array[1];
let confirmed=data_array[2];
let recovered=data_array[3];
let deaths=data_array[4];
let prev_confirm=prev_data_array[0]
let prev_recoverd=prev_data_array[1]
let prev_deaths=prev_data_array[2]
if(!isold)
{
//calculating the difference
var diff_confirm=(confirmed-prev_confirm);
var diff_deaths=(deaths-prev_deaths);
var diff_recovered=(recovered-prev_recoverd);
}
else
{
var diff_confirm=prev_confirm;
var diff_deaths=prev_deaths;
var diff_recovered =prev_recoverd;
}
// img src
let up_img_src="image/upimg.png";
let down_img_src="image/downimg.png";
let upgreen_img_src="image/upgreenimg.png"
let confirm_pic,active_element,recovered_pic,deaths_pic;
let increases="<br><img src="+up_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
let decreases="<br><img src="+down_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
//checking if it add to satus or table
if(isstatus==1)
{
//checking for diff and adding corresponding image for it
if(diff_confirm>0)
{
$("#confirmed-no").append(confirmed+increases+diff_confirm+"</span>");
}
//check if is negative to avoid adding img for zero
else if (diff_confirm<=0)
{
$("#confirmed-no").append(confirmed+decreases+Math.abs(diff_confirm)+"</span>");
}
if(diff_deaths>0)
{
$("#deaths-no").append(deaths+increases+diff_deaths+"</span>");
}
else if(diff_deaths<=0)
{
$("#deaths-no").append(deaths+decreases+Math.abs(diff_deaths)+"</span>");
}
if(diff_recovered>0)
{
//setting uparrow img for recovered
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
$("#recovered-no").append(recovered+increases+diff_recovered+"</span>");
}
else if (diff_recovered<=0)
{
$("#recovered-no").append(recovered+decreases+Math.abs(diff_recovered)+"</span>");
}
$("#active-no").append(active);
$("#status-date").text(date);
}
//add data to table
else
{
if(diff_confirm>0)
{
confirm_pic=confirmed+increases+diff_confirm;
}
else if(diff_confirm<=0)
{
confirm_pic=confirmed+decreases+Math.abs(diff_confirm);
}
if(diff_deaths>0)
{
deaths_pic=deaths+increases+Math.abs(diff_deaths);
}
else if(diff_deaths<=0)
{
deaths_pic=deaths+decreases+Math.abs(diff_deaths);
}
if(diff_recovered>0)
{
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
recovered_pic=recovered+increases+diff_recovered;
}
else if(diff_recovered<=0)
{
recovered_pic=recovered+decreases+Math.abs(diff_recovered);
}
//checking if active persent or not
if(active)
{
active_element= "<div class='active-state'>Active<br><span id='active-no'>"+active+"</span></div>"
}
else{
active_element="<div></div>";
}
//retriving state value
let state_name=data_array[5]?data_array[5]:"unknown state";
state_name_array.push(state_name.toLowerCase())
let class_name=state_name.split(" ")[0].toLowerCase();
let state_html="<div class='state-div "+class_name+"' > \
<p class='state-name' onclick='goToState(event)'>"+state_name+"<br>"+date+"</p> \
<div class='state-data d-flex flex-wrap justify-content-between' > \
"+active_element+
"<div class='confirmed-state'>Confirmed<br><span id='active-no'>"+confirm_pic+"</span></div>\
<div class='recovered-state'>Recovered<br><span id='active-no'>"+recovered_pic+"</span></div>\
<div class='deaths-state'>Deaths<br><span id='active-no'>"+deaths_pic+"</span></div><br>\
</div>"
$(".state-container").append(state_html);
}
}
function goToState(event)
{
//it return total text
let array=event.target.nextSibling.nextSibling.innerText.split("\n");
debugger;
let tactive=tconfirmed=trecovered=tdeaths=diff_confirm=diff_recovered=diff_deaths=0;
if(no_active)
{
tactive="unknown";
index=2
}
else{
if(array[1])
{
tactive=array[1];
}
index=0;
}
if(array[3-index])
{
tconfirmed=array[3-index];
}
if(array[4-index])
{
diff_confirm=array[4-index];
}
if(array[6-index])
{
trecovered=array[6-index];
}
if(array[7-index])
{
diff_recovered=array[7-index];
}
if(array[9-index])
{
tdeaths=array[9-index];
}
if(array[10-index])
{
diff_deaths=array[10-index];
}
//store all data as single object
let state_data={"tactive":tactive,"tconfirmed":tconfirmed,"trecovered":trecovered,"tdeaths":tdeaths,
"diff_confirm":diff_confirm,"diff_recovered":diff_recovered,"diff_deaths":diff_deaths};
//getting state
let state_name =event.target.innerText.split("\n")[0];
let prevdate= event.target.innerText.split("\n")[1];
sessionStorage.setItem("state",state_name);
sessionStorage.setItem("prevdate",prevdate);
sessionStorage.setItem("state_data",JSON.stringify(state_data));
window.location="state.html"
}
function searchState()
{
let search_val=$("#search_input").val().toLowerCase();
$(".search_result").css("display","none");
let is_has;
if(state_name_array.length)
{
is_has=false;
}
else
{
is_has=true;
}
$.each(state_name_array,function(index,state_name){
//split the class name if it has space
let class_name=state_name.split(" ")[0]
if(state_name.startsWith(search_val))
{
is_has=true;
$("."+class_name).addClass("show");
$("."+class_name).removeClass("hide");
}
else
{
$("."+class_name).addClass("hide");
}
});
if(!is_has)
{
$(".search_result").css("display","flex");
}
}
function search()
{
let search_button=document.querySelector("#search_button");
search_button.addEventListener("click",async function(){
let loading=document.querySelector(".loading__container");
loading.style["display"]="flex";
let date=$("#search_date").val();
//if user requested date greater than API CLOSED DATE handle error
if(new Date(date)<new Date(API_CLOSED_DATE))
{
await getSpecificData(date);
}
else
{
loading.style["display"]="none";
handleError();
}
});
}
function | getSpecificData | identifier_name |
|
script.js | cases_time_series"][prev_index-1]["totalconfirmed"];
let prev_recoverd=data["cases_time_series"][prev_index-1]["totalrecovered"];
let prev_deaths=data["cases_time_series"][prev_index-1]["totaldeceased"];
//iterating only statewise object
$.each(data["statewise"],function(index,data){
//storing value for calculation
let date=data["lastupdatedtime"].split(" ")[0];
let active=data["active"];
let confirmed=data["confirmed"];
let recovered=data["recovered"];
let deaths=data["deaths"];
//adding the today status of covid
if(data["state"]==="Total")
{
//call function with isstatus =1
addDataToTable([date,active,confirmed,recovered,deaths],[prev_confirm,prev_recoverd,prev_deaths],1);
}
//pushing the state details to table
else if(!isstatus)
{
//to change date fromat
date=date.split("/").reverse().join("-");
//call function to get prev day data
getPrevDayData(date,data["statecode"],[date,active,confirmed,recovered,deaths,data["state"]])
}
})
}).fail(handleError);
}
catch(error)
{
handleError();
}
}
function getPrevDayData(tdate,statecode,data_array) {
let prevdate=tdate.split("-");
//creating date object and month start from zero so minus one
prevdate=new Date(prevdate[0],prevdate[1]-1,prevdate[2]);
//calculating prev day by sub one
prevdate.setDate(prevdate.getDate()-1);
// it return day/month/year, hr:min:sec AM
prevdate=prevdate.toLocaleString();
//spilting date and time
prevdate=prevdate.split(",")[0];
// convert day/month/year day month year
prevdate=prevdate.split("/");
//if day is single digit adding zero to it
if(prevdate[0].length==1)
{
prevdate[0]="0"+prevdate[0]
}
//if date is single digit adding zero to it
if(prevdate[1].length==1)
{
prevdate[1]="0"+prevdate[1]
}
//changing date format to year/month/date
prevdate=prevdate[2]+"-"+prevdate[0]+"-"+prevdate[1];
let link="https://api.covid19india.org/v3/data-"+prevdate+".json";
//store prev day data in array
let prev_data_array=[]
fetch(link).then((response) => {
return response.json()
}).then((data) =>{
if(data[statecode]!==undefined)
{
if(data[statecode]["total"]["confirmed"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["confirmed"])
}
if(data[statecode]["total"]["recovered"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["recovered"])
}
//if none one died pushing zero
if(data[statecode]["total"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["deceased"])
}
//call functio to add data to tabel isstatus =0
addDataToTable(data_array,prev_data_array,0);
}
}).catch(handleError);
}
function addDataToTable(data_array,prev_data_array,isstatus,isold=0)
{ | let date=data_array[0];
let active=data_array[1];
let confirmed=data_array[2];
let recovered=data_array[3];
let deaths=data_array[4];
let prev_confirm=prev_data_array[0]
let prev_recoverd=prev_data_array[1]
let prev_deaths=prev_data_array[2]
if(!isold)
{
//calculating the difference
var diff_confirm=(confirmed-prev_confirm);
var diff_deaths=(deaths-prev_deaths);
var diff_recovered=(recovered-prev_recoverd);
}
else
{
var diff_confirm=prev_confirm;
var diff_deaths=prev_deaths;
var diff_recovered =prev_recoverd;
}
// img src
let up_img_src="image/upimg.png";
let down_img_src="image/downimg.png";
let upgreen_img_src="image/upgreenimg.png"
let confirm_pic,active_element,recovered_pic,deaths_pic;
let increases="<br><img src="+up_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
let decreases="<br><img src="+down_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
//checking if it add to satus or table
if(isstatus==1)
{
//checking for diff and adding corresponding image for it
if(diff_confirm>0)
{
$("#confirmed-no").append(confirmed+increases+diff_confirm+"</span>");
}
//check if is negative to avoid adding img for zero
else if (diff_confirm<=0)
{
$("#confirmed-no").append(confirmed+decreases+Math.abs(diff_confirm)+"</span>");
}
if(diff_deaths>0)
{
$("#deaths-no").append(deaths+increases+diff_deaths+"</span>");
}
else if(diff_deaths<=0)
{
$("#deaths-no").append(deaths+decreases+Math.abs(diff_deaths)+"</span>");
}
if(diff_recovered>0)
{
//setting uparrow img for recovered
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
$("#recovered-no").append(recovered+increases+diff_recovered+"</span>");
}
else if (diff_recovered<=0)
{
$("#recovered-no").append(recovered+decreases+Math.abs(diff_recovered)+"</span>");
}
$("#active-no").append(active);
$("#status-date").text(date);
}
//add data to table
else
{
if(diff_confirm>0)
{
confirm_pic=confirmed+increases+diff_confirm;
}
else if(diff_confirm<=0)
{
confirm_pic=confirmed+decreases+Math.abs(diff_confirm);
}
if(diff_deaths>0)
{
deaths_pic=deaths+increases+Math.abs(diff_deaths);
}
else if(diff_deaths<=0)
{
deaths_pic=deaths+decreases+Math.abs(diff_deaths);
}
if(diff_recovered>0)
{
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
recovered_pic=recovered+increases+diff_recovered;
}
else if(diff_recovered<=0)
{
recovered_pic=recovered+decreases+Math.abs(diff_recovered);
}
//checking if active persent or not
if(active)
{
active_element= "<div class='active-state'>Active<br><span id='active-no'>"+active+"</span></div>"
}
else{
active_element="<div></div>";
}
//retriving state value
let state_name=data_array[5]?data_array[5]:"unknown state";
state_name_array.push(state_name.toLowerCase())
let class_name=state_name.split(" ")[0].toLowerCase();
let state_html="<div class='state-div "+class_name+"' > \
<p class='state-name' onclick='goToState(event)'>"+state_name+"<br>"+date+"</p> \
<div class='state-data d-flex flex-wrap justify-content-between' > \
"+active_element+
"<div class='confirmed-state'>Confirmed<br><span id='active-no'>"+confirm_pic+"</span></div>\
<div class='recovered-state'>Recovered<br><span id='active-no'>"+recovered_pic+"</span></div>\
<div class='deaths-state'>Deaths<br><span id='active-no'>"+deaths_pic+"</span></div><br>\
</div>"
$(".state-container").append(state_html);
}
}
function goToState(event)
{
//it return total text
let array=event.target.nextSibling.nextSibling.innerText.split("\n");
debugger;
let tactive=tconfirmed=trecovered=tdeaths=diff_confirm=diff_recovered=diff_deaths=0;
if(no_active)
{
tactive="unknown | //retrive the valu from array | random_line_split |
script.js | died pushing zero
if(data[statecode]["total"]["deceased"]===undefined)
{
prev_data_array.push(0)
}
else
{
prev_data_array.push(data[statecode]["total"]["deceased"])
}
//call functio to add data to tabel isstatus =0
addDataToTable(data_array,prev_data_array,0);
}
}).catch(handleError);
}
function addDataToTable(data_array,prev_data_array,isstatus,isold=0)
{
//retrive the valu from array
let date=data_array[0];
let active=data_array[1];
let confirmed=data_array[2];
let recovered=data_array[3];
let deaths=data_array[4];
let prev_confirm=prev_data_array[0]
let prev_recoverd=prev_data_array[1]
let prev_deaths=prev_data_array[2]
if(!isold)
{
//calculating the difference
var diff_confirm=(confirmed-prev_confirm);
var diff_deaths=(deaths-prev_deaths);
var diff_recovered=(recovered-prev_recoverd);
}
else
{
var diff_confirm=prev_confirm;
var diff_deaths=prev_deaths;
var diff_recovered =prev_recoverd;
}
// img src
let up_img_src="image/upimg.png";
let down_img_src="image/downimg.png";
let upgreen_img_src="image/upgreenimg.png"
let confirm_pic,active_element,recovered_pic,deaths_pic;
let increases="<br><img src="+up_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
let decreases="<br><img src="+down_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
//checking if it add to satus or table
if(isstatus==1)
{
//checking for diff and adding corresponding image for it
if(diff_confirm>0)
{
$("#confirmed-no").append(confirmed+increases+diff_confirm+"</span>");
}
//check if is negative to avoid adding img for zero
else if (diff_confirm<=0)
{
$("#confirmed-no").append(confirmed+decreases+Math.abs(diff_confirm)+"</span>");
}
if(diff_deaths>0)
{
$("#deaths-no").append(deaths+increases+diff_deaths+"</span>");
}
else if(diff_deaths<=0)
{
$("#deaths-no").append(deaths+decreases+Math.abs(diff_deaths)+"</span>");
}
if(diff_recovered>0)
{
//setting uparrow img for recovered
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
$("#recovered-no").append(recovered+increases+diff_recovered+"</span>");
}
else if (diff_recovered<=0)
{
$("#recovered-no").append(recovered+decreases+Math.abs(diff_recovered)+"</span>");
}
$("#active-no").append(active);
$("#status-date").text(date);
}
//add data to table
else
{
if(diff_confirm>0)
{
confirm_pic=confirmed+increases+diff_confirm;
}
else if(diff_confirm<=0)
{
confirm_pic=confirmed+decreases+Math.abs(diff_confirm);
}
if(diff_deaths>0)
{
deaths_pic=deaths+increases+Math.abs(diff_deaths);
}
else if(diff_deaths<=0)
{
deaths_pic=deaths+decreases+Math.abs(diff_deaths);
}
if(diff_recovered>0)
{
let increases="<br><img src="+upgreen_img_src+" height='12px' width='12px'><span style='font-size:.7rem;'>"
recovered_pic=recovered+increases+diff_recovered;
}
else if(diff_recovered<=0)
{
recovered_pic=recovered+decreases+Math.abs(diff_recovered);
}
//checking if active persent or not
if(active)
{
active_element= "<div class='active-state'>Active<br><span id='active-no'>"+active+"</span></div>"
}
else{
active_element="<div></div>";
}
//retriving state value
let state_name=data_array[5]?data_array[5]:"unknown state";
state_name_array.push(state_name.toLowerCase())
let class_name=state_name.split(" ")[0].toLowerCase();
let state_html="<div class='state-div "+class_name+"' > \
<p class='state-name' onclick='goToState(event)'>"+state_name+"<br>"+date+"</p> \
<div class='state-data d-flex flex-wrap justify-content-between' > \
"+active_element+
"<div class='confirmed-state'>Confirmed<br><span id='active-no'>"+confirm_pic+"</span></div>\
<div class='recovered-state'>Recovered<br><span id='active-no'>"+recovered_pic+"</span></div>\
<div class='deaths-state'>Deaths<br><span id='active-no'>"+deaths_pic+"</span></div><br>\
</div>"
$(".state-container").append(state_html);
}
}
function goToState(event)
{
//it return total text
let array=event.target.nextSibling.nextSibling.innerText.split("\n");
debugger;
let tactive=tconfirmed=trecovered=tdeaths=diff_confirm=diff_recovered=diff_deaths=0;
if(no_active)
{
tactive="unknown";
index=2
}
else{
if(array[1])
{
tactive=array[1];
}
index=0;
}
if(array[3-index])
{
tconfirmed=array[3-index];
}
if(array[4-index])
{
diff_confirm=array[4-index];
}
if(array[6-index])
{
trecovered=array[6-index];
}
if(array[7-index])
{
diff_recovered=array[7-index];
}
if(array[9-index])
{
tdeaths=array[9-index];
}
if(array[10-index])
{
diff_deaths=array[10-index];
}
//store all data as single object
let state_data={"tactive":tactive,"tconfirmed":tconfirmed,"trecovered":trecovered,"tdeaths":tdeaths,
"diff_confirm":diff_confirm,"diff_recovered":diff_recovered,"diff_deaths":diff_deaths};
//getting state
let state_name =event.target.innerText.split("\n")[0];
let prevdate= event.target.innerText.split("\n")[1];
sessionStorage.setItem("state",state_name);
sessionStorage.setItem("prevdate",prevdate);
sessionStorage.setItem("state_data",JSON.stringify(state_data));
window.location="state.html"
}
function searchState()
{
let search_val=$("#search_input").val().toLowerCase();
$(".search_result").css("display","none");
let is_has;
if(state_name_array.length)
{
is_has=false;
}
else
{
is_has=true;
}
$.each(state_name_array,function(index,state_name){
//split the class name if it has space
let class_name=state_name.split(" ")[0]
if(state_name.startsWith(search_val))
{
is_has=true;
$("."+class_name).addClass("show");
$("."+class_name).removeClass("hide");
}
else
{
$("."+class_name).addClass("hide");
}
});
if(!is_has)
{
$(".search_result").css("display","flex");
}
}
function search()
{
let search_button=document.querySelector("#search_button");
search_button.addEventListener("click",async function(){
let loading=document.querySelector(".loading__container");
loading.style["display"]="flex";
let date=$("#search_date").val();
//if user requested date greater than API CLOSED DATE handle error
if(new Date(date)<new Date(API_CLOSED_DATE))
{
await getSpecificData(date);
}
else
{
loading.style["display"]="none";
handleError();
}
});
}
function getSpecificData(date)
| {
//setting no active to 1
no_active=1;
$(".state-container").empty();
document.querySelector(".state-container").style.display="flex";
document.querySelector(".error_container").style.display="none";
let link="https://api.covid19india.org/v3/data-"+date+".json";
$.getJSON(link,function(datas){
for(data in datas){
let data_array=[];
let prev_data_array=[];
data_array.push(date);
//pushing active has no active
data_array.push(0)
if(datas[data]["delta"])
{
if(datas[data]["delta"]["confirmed"]===undefined)
{
prev_data_array.push(0) | identifier_body |
|
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f")
self.min_laser_power_mW = self.unpack((3, 32, 4), "f")
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
print("*** not firing laser because --fire-laser not specified ***")
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def | (self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0)
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
| set_gain_db | identifier_name |
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f")
self.min_laser_power_mW = self.unpack((3, 32, 4), "f")
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
|
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def set_gain_db(self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0)
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
| print("*** not firing laser because --fire-laser not specified ***") | conditional_block |
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f") |
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
print("*** not firing laser because --fire-laser not specified ***")
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def set_gain_db(self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0)
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
| self.min_laser_power_mW = self.unpack((3, 32, 4), "f") | random_line_split |
collect-raman.py | ")
parser.add_argument("--scans-to-average", type=int, help="scans to average (default 0)", default=1)
self.args = parser.parse_args()
# grab first spectrometer on the chain
device = usb.core.find(idVendor=0x24aa, idProduct=0x4000)
if device is None:
print("No spectrometers found")
return
self.debug(device)
self.device = device
# claim device (I'm never sure when this is required)
if False:
self.debug("claiming spectrometer")
self.device.set_configuration(1)
usb.util.claim_interface(self.device, 0)
self.debug("claimed device")
# read configuration
self.fw_version = self.get_firmware_version()
self.fpga_version = self.get_fpga_version()
self.read_eeprom()
self.generate_wavelengths()
print(f"Connected to {self.model} {self.serial_number} with {self.pixels} pixels ({self.wavelengths[0]:.2f}, {self.wavelengths[-1]:.2f}nm) ({self.wavenumbers[0]:.2f}, {self.wavenumbers[-1]:.2f}cm-1)")
print(f"ARM {self.fw_version}, FPGA {self.fpga_version}")
def read_eeprom(self):
self.buffers = [self.get_cmd(0xff, 0x01, page) for page in range(8)]
# parse key fields (extend as needed)
self.format = self.unpack((0, 63, 1), "B")
self.model = self.unpack((0, 0, 16), "s")
self.serial_number = self.unpack((0, 16, 16), "s")
self.pixels = self.unpack((2, 16, 2), "H")
self.excitation_nm = self.unpack((3, 36, 4), "f")
self.wavecal_C0 = self.unpack((1, 0, 4), "f")
self.wavecal_C1 = self.unpack((1, 4, 4), "f")
self.wavecal_C2 = self.unpack((1, 8, 4), "f")
self.wavecal_C3 = self.unpack((1, 12, 4), "f")
# unsure if SiG receive laser power calibration, but capturing for when they do
self.laser_power_C0 = self.unpack((3, 12, 4), "f")
self.laser_power_C1 = self.unpack((3, 16, 4), "f")
self.laser_power_C2 = self.unpack((3, 20, 4), "f")
self.laser_power_C3 = self.unpack((3, 24, 4), "f")
self.max_laser_power_mW = self.unpack((3, 28, 4), "f")
self.min_laser_power_mW = self.unpack((3, 32, 4), "f")
def generate_wavelengths(self):
self.wavelengths = []
self.wavenumbers = []
for i in range(self.pixels):
wavelength = self.wavecal_C0 \
+ self.wavecal_C1 * i \
+ self.wavecal_C2 * i * i \
+ self.wavecal_C3 * i * i * i
wavenumber = 1e7 / self.excitation_nm - 1e7 / wavelength
self.wavelengths.append(wavelength)
self.wavenumbers.append(wavenumber)
############################################################################
# Commands
############################################################################
def run(self):
# disable laser
self.set_laser_enable(False)
# set integration time
self.set_integration_time_ms(self.args.integration_time_ms)
# set gain dB
self.set_gain_db(self.args.gain_db)
# perform one throwaway (seems to help SiG)
self.get_spectrum()
# take dark
if self.args.dark:
print("taking dark")
self.dark = self.get_averaged_spectrum()
# open outfile
if self.args.outfile is not None:
self.outfile = open(self.args.outfile, 'w')
# header rows
self.outfile.write("pixel, %s\n" % (", ".join([str(x) for x in range(self.pixels)])))
self.outfile.write("wavelength, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavelengths])))
self.outfile.write("wavenumber, %s\n" % (", ".join([f"{x:.2f}" for x in self.wavenumbers])))
# enable laser
if self.args.fire_laser:
if self.args.laser_power_perc is not None:
self.set_laser_power_perc(self.args.laser_power_perc)
self.set_laser_enable(True)
else:
print("*** not firing laser because --fire-laser not specified ***")
# take measurements
spectra = []
try:
for i in range(self.args.count):
# take dark-corrected measurement
spectrum = self.get_averaged_spectrum()
if self.dark is not None:
spectrum -= dark
spectra.append(spectrum)
# save measurement
now = datetime.now()
print("%s Spectrum %3d/%3d %s ..." % (now, i+1, self.args.count, spectrum[:10]))
if self.outfile is not None:
self.outfile.write("%s, %s\n" % (now, ", ".join([f"{x:.2f}" for x in spectrum])))
# delay before next
sleep(self.args.delay_ms / 1000.0 )
except:
print("caught exception reading spectra")
traceback.print_exc()
# disable laser
self.set_laser_enable(False)
# close file
if self.outfile is not None:
self.outfile.close()
# graph
if self.args.plot:
for a in spectra:
plt.plot(a)
plt.title(f"integration time {self.args.integration_time_ms}ms, gain {self.args.gain_db}dB, count {self.args.count}")
plt.show()
############################################################################
# opcodes
############################################################################
def get_firmware_version(self):
result = self.get_cmd(0xc0)
if result is not None and len(result) >= 4:
return "%d.%d.%d.%d" % (result[3], result[2], result[1], result[0])
def get_fpga_version(self):
s = ""
result = self.get_cmd(0xb4)
if result is not None:
for i in range(len(result)):
c = result[i]
if 0x20 <= c < 0x7f:
s += chr(c)
return s
def set_laser_enable(self, flag):
print(f"setting laserEnable {flag}")
self.send_cmd(0xbe, 1 if flag else 0)
if flag and self.args.laser_warmup_ms > 0:
print(f"{datetime.now()} starting laser warmup")
sleep(self.args.laser_warmup_ms / 1000.0)
print(f"{datetime.now()} finished laser warmup")
def set_integration_time_ms(self, ms):
if ms < 1 or ms > 0xffff:
print("ERROR: integrationTimeMS requires positive uint16")
return
self.debug(f"setting integrationTimeMS to {ms}")
self.send_cmd(0xb2, ms)
def set_gain_db(self, db):
db = round(db, 1)
msb = int(db)
lsb = int((db - int(db)) * 10)
raw = (msb << 8) | lsb
self.debug("setting gainDB 0x%04x (FunkyFloat)" % raw)
self.send_cmd(0xb7, raw)
def set_modulation_enable(self, flag):
|
def set_raman_mode(self, flag):
self.debug(f"setting ramanMode {flag}")
self.send_cmd(0xff, 0x16, 1 if flag else 0)
def set_raman_delay_ms(self, ms):
if ms < 0 or ms > 0xffff:
print("ERROR: ramanDelay requires uint16")
return
self.debug(f"setting ramanDelay {ms} ms")
self.send_cmd(0xff, 0x20, ms)
def set_watchdog_sec(self, sec):
if sec < 0 or sec > 0xffff:
print("ERROR: laserWatchdog requires uint16")
return
self.debug(f"setting laserWatchdog {sec} sec")
self.send_cmd(0xff, 0x18, sec)
def get_averaged_spectrum(self):
spectrum = self.get_spectrum()
| self.debug(f"setting laserModulationEnable {flag}")
self.send_cmd(0xbd, 1 if flag else 0) | identifier_body |
MT3D_PP_viz.py | 4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
if col == nam:
rgb_ref += [rgb_all[index]]
# End if
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, end = ax.get_xlim()
start = start // 1000 * 1000 + 1000
end = end // 1000 * 1000 - 1000
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax3 = fig.add_axes([0.19, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax3)
ax = fig.add_subplot(2, 4, 6, aspect='equal')
ax.set_title('Renmark')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[5], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax4 = fig.add_axes([0.43, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax4)
ax = fig.add_subplot(2, 4, 7, aspect='equal')
ax.set_title('Basement')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[6], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.0))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax5 = fig.add_axes([0.67, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax5)
fig.subplots_adjust(left=0.01, right=0.95, bottom=0.05, top=0.95, wspace=0.1, hspace=0.12)
plt.show()
# End viewConcsByZone()
def compareAllObs2(self, specimen):
| conc = None
sft_conc = None
obs_group = self.mf_model.model_data.observations.obs_group
obs_sim_zone_all = []
# Write observation to file
for obs_set in obs_group:
obs_sim_zone_all = []
obs_type = obs_group[obs_set]['obs_type']
# Import the required model outputs for processing
if obs_type not in ['concentration', 'EC', 'Radon']:
continue
else:
print("Processing {}".format(obs_set))
if (obs_type == 'concentration') & (specimen == 'C14'):
# Check if model outputs have already been imported and if not import
if not conc: | identifier_body |
|
MT3D_PP_viz.py | (self):
"""TODO: Docs"""
concobj = self.import_concs()
times = concobj.get_times()
scatterx = []
scattery = []
obs_sim_zone_all = []
# The definition of obs_sim_zone looks like:
for i in range(self.mf_model.model_data.model_time.t['steps']):
conc = concobj.get_data(totim=times[i])
self.compare_observed('C14', conc, nper=i)
obs_sim_zone_all += self.obs_sim_zone
scatterx = np.array([h[0] for h in obs_sim_zone_all])
scattery = np.array([h[1] for h in obs_sim_zone_all])
# First step is to set up the plot
width = 20
height = 5
multiplier = 1.0
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
ax = fig.add_subplot(1, 3, 1)
ax.set_title('Residuals')
ax.hist([loc[0] - loc[1] for loc in obs_sim_zone_all], bins=20, alpha=0.5)
ax = fig.add_subplot(1, 3, 2)
ax.set_title('Sim vs Obs (%d points)' % (len(scatterx)))
comp_zone_plots = {}
colours = ['r', 'orangered', 'y', 'green', 'teal', 'blue', 'fuchsia']
for i in xrange(1, 8):
scatterx2 = [loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolors='none', alpha=0.5)
plt.legend((comp_zone_plots[1], comp_zone_plots[2], comp_zone_plots[3],
comp_zone_plots[4], comp_zone_plots[5], comp_zone_plots[6],
comp_zone_plots[7]),
('qa', 'utb', 'utqa', 'utam', 'utaf', 'lta', 'bse'),
scatterpoints=1,
loc='upper left',
ncol=4,
fontsize=11)
plt.xlabel('Observed')
plt.ylabel('Simulated', labelpad=10)
ax.text(150, 75, 'Model Efficiency = %4.2f' % (metric_me(scattery, scatterx)))
ax.text(150, 40, 'PBIAS = %4.2f%%' % (metric_pbias(scattery, scatterx)))
ax.text(150, 20, 'RMSE = %4.2f' % (metric_rmse(scattery, scatterx)))
ax.plot(ax.get_ylim(), ax.get_ylim())
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Residuals in space')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
x = np.array([h[3] for h in obs_sim_zone_all])
y = np.array([h[4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
if col == nam:
rgb_ref += [rgb_all[index]]
# End if
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, | compareAllObs | identifier_name |
|
MT3D_PP_viz.py | ax.set_title('Sim vs Obs (%d points)' % (len(scatterx)))
comp_zone_plots = {}
colours = ['r', 'orangered', 'y', 'green', 'teal', 'blue', 'fuchsia']
for i in xrange(1, 8):
scatterx2 = [loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolors='none', alpha=0.5)
plt.legend((comp_zone_plots[1], comp_zone_plots[2], comp_zone_plots[3],
comp_zone_plots[4], comp_zone_plots[5], comp_zone_plots[6],
comp_zone_plots[7]),
('qa', 'utb', 'utqa', 'utam', 'utaf', 'lta', 'bse'),
scatterpoints=1,
loc='upper left',
ncol=4,
fontsize=11)
plt.xlabel('Observed')
plt.ylabel('Simulated', labelpad=10)
ax.text(150, 75, 'Model Efficiency = %4.2f' % (metric_me(scattery, scatterx)))
ax.text(150, 40, 'PBIAS = %4.2f%%' % (metric_pbias(scattery, scatterx)))
ax.text(150, 20, 'RMSE = %4.2f' % (metric_rmse(scattery, scatterx)))
ax.plot(ax.get_ylim(), ax.get_ylim())
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Residuals in space')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
x = np.array([h[3] for h in obs_sim_zone_all])
y = np.array([h[4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
if col == nam:
rgb_ref += [rgb_all[index]]
# End if
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, end = ax.get_xlim()
start = start // 1000 * 1000 + 1000
end = end // 1000 * 1000 - 1000
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax3 = fig.add_axes([0.19, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax3)
ax = fig.add_subplot(2, 4, 6, aspect='equal')
ax.set_title('Renmark')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[5], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set |
ax = fig.add_subplot(1, 3, 2) | random_line_split |
|
MT3D_PP_viz.py | loc[0] for loc in obs_sim_zone_all if loc[2] == float(i)]
scattery2 = [loc[1] for loc in obs_sim_zone_all if loc[2] == float(i)]
# print len(scatterx2), colours[i-1]
comp_zone_plots[i] = ax.scatter(scatterx2, scattery2, edgecolors=colours[
i - 1], facecolors='none', alpha=0.5)
plt.legend((comp_zone_plots[1], comp_zone_plots[2], comp_zone_plots[3],
comp_zone_plots[4], comp_zone_plots[5], comp_zone_plots[6],
comp_zone_plots[7]),
('qa', 'utb', 'utqa', 'utam', 'utaf', 'lta', 'bse'),
scatterpoints=1,
loc='upper left',
ncol=4,
fontsize=11)
plt.xlabel('Observed')
plt.ylabel('Simulated', labelpad=10)
ax.text(150, 75, 'Model Efficiency = %4.2f' % (metric_me(scattery, scatterx)))
ax.text(150, 40, 'PBIAS = %4.2f%%' % (metric_pbias(scattery, scatterx)))
ax.text(150, 20, 'RMSE = %4.2f' % (metric_rmse(scattery, scatterx)))
ax.plot(ax.get_ylim(), ax.get_ylim())
ax = fig.add_subplot(1, 3, 3)
ax.set_title('Residuals in space')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
x = np.array([h[3] for h in obs_sim_zone_all])
y = np.array([h[4] for h in obs_sim_zone_all])
zone = [h[2] for h in obs_sim_zone_all]
residuals = [h[0] - h[1] for h in obs_sim_zone_all]
residuals = np.absolute(residuals)
from matplotlib import colors
import six
colors_ = list(six.iteritems(colors.cnames))
# Add the single letter colors.
for name, rgb in six.iteritems(colors.ColorConverter.colors):
hex_ = colors.rgb2hex(rgb)
colors_.append((name, hex_))
hex_ = [color[1] for color in colors_]
nams = [color[0] for color in colors_]
# Get the rgb equivalent.
rgb_all = [colors.hex2color(color) for color in hex_]
rgb_ref = []
for col in colours:
for index, nam in enumerate(nams):
|
# End for
# End for
zone = np.array(zone)
rgba_colors = np.zeros((len(x), 4))
# for red the first column needs to be one
for i in range(1, 8):
rgba_colors[:, 0][zone == i] = rgb_ref[i - 1][0]
rgba_colors[:, 1][zone == i] = rgb_ref[i - 1][1]
rgba_colors[:, 2][zone == i] = rgb_ref[i - 1][2]
# the fourth column needs to be your alphas
rgba_colors[:, 3] = residuals / np.max(residuals) # alphas
plt.scatter(x, y, color=rgba_colors)
plt.show()
# End compareAllObs()
def viewConcsByZone(self, nper='all', specimen=None):
"""
:param nper: (Default value = 'all')
:param specimen: (Default value = None)
"""
# Create the headfile object
concobj = self.import_concs()
times = concobj.get_times()
if nper == 'all':
conc = concobj.get_alldata()
conc = np.mean(conc, axis=0)
zoned = self.concs_by_zone(conc)
conc = zoned
elif nper == 'final':
conc = concobj.get_data(totim=times[-1])
zoned = self.concs_by_zone(conc)
conc = zoned
else:
conc = concobj.get_data(totim=times[nper])
zoned = self.concs_by_zone(conc)
conc = zoned
# End if
# First step is to set up the plot
width = 20
height = 10
multiplier = 1.
fig = plt.figure(figsize=(width * multiplier, height * multiplier))
vmin = np.amin(conc[conc > 0.])
vmax = np.amax(conc)
ax = fig.add_subplot(2, 4, 1, aspect='equal')
ax.set_title('ibound and bc')
# Next we create an instance of the ModelMap class
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
modelmap.plot_ibound()
modelmap.plot_bc('RIV', plotAll=True)
try:
modelmap.plot_bc('WEL', plotAll=True)
except Exception:
pass
modelmap.plot_bc('GHB', plotAll=True)
modelmap.plot_bc('SFR', plotAll=True)
try:
modelmap.plot_bc('DRN', plotAll=True)
except Exception:
pass
ax.axes.xaxis.set_ticklabels([])
ax = fig.add_subplot(2, 4, 2, aspect='equal')
ax.set_title('Coonambidgal')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
min_conc = -100.0
max_conc = 100.0
temp = max_conc
max_conc = vmax
vmax = 100.0
array = modelmap.plot_array(
conc[0], masked_values=[-999.98999023, max_conc, min_conc], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax2 = fig.add_axes([0.43, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax2)
ax = fig.add_subplot(2, 4, 3, aspect='equal')
ax.set_title('Shepparton')
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[2], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
cbar_ax1 = fig.add_axes([0.67, 0.525, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax1)
ax = fig.add_subplot(2, 4, 5, aspect='equal')
ax.set_title('Calivil')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[4], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
start, end = ax.get_xlim()
start = start // 1000 * 1000 + 1000
end = end // 1000 * 1000 - 1000
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax3 = fig.add_axes([0.19, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax3)
ax = fig.add_subplot(2, 4, 6, aspect='equal')
ax.set_title('Renmark')
# , sr=self.mf.dis.sr, dis=self.mf.dis)
modelmap = flopy.plot.ModelMap(model=self.mf_model.mf)
array = modelmap.plot_array(
conc[5], masked_values=[-999.98999023, max_conc, min_conc, np.nan], alpha=0.5, vmin=vmin, vmax=vmax)
ax.yaxis.set_ticklabels([])
ax.xaxis.set_ticks(np.arange(start, end, 20000.))
ax.xaxis.get_major_formatter().set_powerlimits((0, 1))
cbar_ax4 = fig.add_axes([0.43, 0.055, 0.01, 0.42])
fig.colorbar(array, cax=cbar_ax4)
| if col == nam:
rgb_ref += [rgb_all[index]]
# End if | conditional_block |
linebreak.rs | imal" breaking algorithm in the style of
// Knuth, D.E., and Plass, M.F. "Breaking Paragraphs into Lines." in Software,
// Practice and Experience. Vol. 11, No. 11, November 1981.
// http://onlinelibrary.wiley.com/doi/10.1002/spe.4380111102/pdf
fn break_knuth_plass<'a, T: Clone + Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
// run the algorithm to get the breakpoints
let breakpoints = find_kp_breakpoints(iter.clone(), args);
// iterate through the breakpoints (note that breakpoints is in reverse break order, so we .rev() it
let result: std::io::Result<(bool, bool)> = breakpoints.iter().rev().try_fold(
(false, false),
|(mut prev_punct, mut fresh), &(next_break, break_before)| {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
// at each breakpoint, keep emitting words until we find the word matching this breakpoint
for winfo in &mut iter {
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
fresh = false;
prev_punct = winfo.ends_punct;
// We find identical breakpoints here by comparing addresses of the references.
// This is OK because the backing vector is not mutating once we are linebreaking.
let winfo_ptr = winfo as *const _;
let next_break_ptr = next_break as *const _;
if winfo_ptr == next_break_ptr {
// OK, we found the matching word
if break_before {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
} else {
// breaking after this word, so that means "fresh" is true for the next iteration
write_with_spaces(word, slen, args.ostream)?;
fresh = true;
}
break;
} else {
write_with_spaces(word, slen, args.ostream)?;
}
}
Ok((prev_punct, fresh))
},
);
let (mut prev_punct, mut fresh) = result?;
// after the last linebreak, write out the rest of the final line.
for winfo in iter {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
prev_punct = winfo.ends_punct;
fresh = false;
write_with_spaces(word, slen, args.ostream)?;
}
args.ostream.write_all(b"\n")
}
struct LineBreak<'a> {
prev: usize,
linebreak: Option<&'a WordInfo<'a>>,
break_before: bool,
demerits: i64,
prev_rat: f32,
length: usize,
fresh: bool,
}
#[allow(clippy::cognitive_complexity)]
fn find_kp_breakpoints<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
iter: T,
args: &BreakArgs<'a>,
) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut iter = iter.peekable();
// set up the initial null linebreak
let mut linebreaks = vec![LineBreak {
prev: 0,
linebreak: None,
break_before: false,
demerits: 0,
prev_rat: 0.0f32,
length: args.init_len,
fresh: false,
}];
// this vec holds the current active linebreaks; next_ holds the breaks that will be active for
// the next word
let active_breaks = &mut vec![0];
let next_active_breaks = &mut vec![];
let stretch = (args.opts.width - args.opts.goal) as isize;
let minlength = args.opts.goal - stretch as usize;
let mut new_linebreaks = vec![];
let mut is_sentence_start = false;
let mut least_demerits = 0;
loop {
let w = match iter.next() {
None => break,
Some(w) => w,
};
// if this is the last word, we don't add additional demerits for this break
let (is_last_word, is_sentence_end) = match iter.peek() {
None => (true, true),
Some(&&WordInfo {
sentence_start: st,
new_line: nl,
..
}) => (false, st || (nl && w.ends_punct)),
};
// should we be adding extra space at the beginning of the next sentence?
let slen = compute_slen(args.uniform, w.new_line, is_sentence_start, false);
let mut ld_new = i64::MAX;
let mut ld_next = i64::MAX;
let mut ld_idx = 0;
new_linebreaks.clear();
next_active_breaks.clear();
// go through each active break, extending it and possibly adding a new active
// break if we are above the minimum required length
#[allow(clippy::explicit_iter_loop)]
for &i in active_breaks.iter() {
let active = &mut linebreaks[i];
// normalize demerits to avoid overflow, and record if this is the least
active.demerits -= least_demerits;
if active.demerits < ld_next {
ld_next = active.demerits;
ld_idx = i;
}
// get the new length
let tlen = w.word_nchars
+ args.compute_width(w, active.length, active.fresh)
+ slen
+ active.length;
// if tlen is longer than args.opts.width, we drop this break from the active list
// otherwise, we extend the break, and possibly add a new break at this point
if tlen <= args.opts.width {
// this break will still be active next time
next_active_breaks.push(i);
// we can put this word on this line
active.fresh = false;
active.length = tlen;
// if we're above the minlength, we can also consider breaking here
if tlen >= minlength {
let (new_demerits, new_ratio) = if is_last_word {
// there is no penalty for the final line's length
(0, 0.0)
} else {
compute_demerits(
args.opts.goal as isize - tlen as isize,
stretch,
w.word_nchars as isize,
active.prev_rat,
)
};
// do not even consider adding a line that has too many demerits
// also, try to detect overflow by checking signum
let total_demerits = new_demerits + active.demerits;
if new_demerits < BAD_INFTY_SQ
&& total_demerits < ld_new
&& active.demerits.signum() <= new_demerits.signum()
{
ld_new = total_demerits;
new_linebreaks.push(LineBreak {
prev: i,
linebreak: Some(w),
break_before: false,
demerits: total_demerits,
prev_rat: new_ratio,
length: args.indent_len,
fresh: true,
});
}
}
}
}
// if we generated any new linebreaks, add the last one to the list
// the last one is always the best because we don't add to new_linebreaks unless
// it's better than the best one so far
match new_linebreaks.pop() {
None => (),
Some(lb) => {
next_active_breaks.push(linebreaks.len());
linebreaks.push(lb);
}
}
if next_active_breaks.is_empty() {
// every potential linebreak is too long! choose the linebreak with the least demerits, ld_idx
let new_break =
restart_active_breaks(args, &linebreaks[ld_idx], ld_idx, w, slen, minlength);
next_active_breaks.push(linebreaks.len());
linebreaks.push(new_break);
least_demerits = 0;
} else {
// next time around, normalize out the demerits fields
// on active linebreaks to make overflow less likely
least_demerits = cmp::max(ld_next, 0);
}
// swap in new list of active breaks
mem::swap(active_breaks, next_active_breaks);
// If this was the last word in a sentence, the next one must be the first in the next.
is_sentence_start = is_sentence_end;
}
// return the best path
build_best_path(&linebreaks, active_breaks)
}
fn | build_best_path | identifier_name |
|
linebreak.rs |
}
// break_simple implements a "greedy" breaking algorithm: print words until
// maxlength would be exceeded, then print a linebreak and indent and continue.
fn break_simple<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
iter.try_fold((args.init_len, false), |l, winfo| {
accum_words_simple(args, l, winfo)
})?;
args.ostream.write_all(b"\n")
}
fn accum_words_simple<'a>(
args: &mut BreakArgs<'a>,
(l, prev_punct): (usize, bool),
winfo: &'a WordInfo<'a>,
) -> std::io::Result<(usize, bool)> {
// compute the length of this word, considering how tabs will expand at this position on the line
let wlen = winfo.word_nchars + args.compute_width(winfo, l, false);
let slen = compute_slen(
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
if l + wlen + slen > args.opts.width {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
Ok((args.indent_len + winfo.word_nchars, winfo.ends_punct))
} else {
write_with_spaces(winfo.word, slen, args.ostream)?;
Ok((l + wlen + slen, winfo.ends_punct))
}
}
// break_knuth_plass implements an "optimal" breaking algorithm in the style of
// Knuth, D.E., and Plass, M.F. "Breaking Paragraphs into Lines." in Software,
// Practice and Experience. Vol. 11, No. 11, November 1981.
// http://onlinelibrary.wiley.com/doi/10.1002/spe.4380111102/pdf
fn break_knuth_plass<'a, T: Clone + Iterator<Item = &'a WordInfo<'a>>>(
mut iter: T,
args: &mut BreakArgs<'a>,
) -> std::io::Result<()> {
// run the algorithm to get the breakpoints
let breakpoints = find_kp_breakpoints(iter.clone(), args);
// iterate through the breakpoints (note that breakpoints is in reverse break order, so we .rev() it
let result: std::io::Result<(bool, bool)> = breakpoints.iter().rev().try_fold(
(false, false),
|(mut prev_punct, mut fresh), &(next_break, break_before)| {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
// at each breakpoint, keep emitting words until we find the word matching this breakpoint
for winfo in &mut iter {
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
fresh = false;
prev_punct = winfo.ends_punct;
// We find identical breakpoints here by comparing addresses of the references.
// This is OK because the backing vector is not mutating once we are linebreaking.
let winfo_ptr = winfo as *const _;
let next_break_ptr = next_break as *const _;
if winfo_ptr == next_break_ptr {
// OK, we found the matching word
if break_before {
write_newline(args.indent_str, args.ostream)?;
write_with_spaces(&winfo.word[winfo.word_start..], 0, args.ostream)?;
} else {
// breaking after this word, so that means "fresh" is true for the next iteration
write_with_spaces(word, slen, args.ostream)?;
fresh = true;
}
break;
} else {
write_with_spaces(word, slen, args.ostream)?;
}
}
Ok((prev_punct, fresh))
},
);
let (mut prev_punct, mut fresh) = result?;
// after the last linebreak, write out the rest of the final line.
for winfo in iter {
if fresh {
write_newline(args.indent_str, args.ostream)?;
}
let (slen, word) = slice_if_fresh(
fresh,
winfo.word,
winfo.word_start,
args.uniform,
winfo.new_line,
winfo.sentence_start,
prev_punct,
);
prev_punct = winfo.ends_punct;
fresh = false;
write_with_spaces(word, slen, args.ostream)?;
}
args.ostream.write_all(b"\n")
}
struct LineBreak<'a> {
prev: usize,
linebreak: Option<&'a WordInfo<'a>>,
break_before: bool,
demerits: i64,
prev_rat: f32,
length: usize,
fresh: bool,
}
#[allow(clippy::cognitive_complexity)]
fn find_kp_breakpoints<'a, T: Iterator<Item = &'a WordInfo<'a>>>(
iter: T,
args: &BreakArgs<'a>,
) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut iter = iter.peekable();
// set up the initial null linebreak
let mut linebreaks = vec![LineBreak {
prev: 0,
linebreak: None,
break_before: false,
demerits: 0,
prev_rat: 0.0f32,
length: args.init_len,
fresh: false,
}];
// this vec holds the current active linebreaks; next_ holds the breaks that will be active for
// the next word
let active_breaks = &mut vec![0];
let next_active_breaks = &mut vec![];
let stretch = (args.opts.width - args.opts.goal) as isize;
let minlength = args.opts.goal - stretch as usize;
let mut new_linebreaks = vec![];
let mut is_sentence_start = false;
let mut least_demerits = 0;
loop {
let w = match iter.next() {
None => break,
Some(w) => w,
};
// if this is the last word, we don't add additional demerits for this break
let (is_last_word, is_sentence_end) = match iter.peek() {
None => (true, true),
Some(&&WordInfo {
sentence_start: st,
new_line: nl,
..
}) => (false, st || (nl && w.ends_punct)),
};
// should we be adding extra space at the beginning of the next sentence?
let slen = compute_slen(args.uniform, w.new_line, is_sentence_start, false);
let mut ld_new = i64::MAX;
let mut ld_next = i64::MAX;
let mut ld_idx = 0;
new_linebreaks.clear();
next_active_breaks.clear();
// go through each active break, extending it and possibly adding a new active
// break if we are above the minimum required length
#[allow(clippy::explicit_iter_loop)]
for &i in active_breaks.iter() {
let active = &mut linebreaks[i];
// normalize demerits to avoid overflow, and record if this is the least
active.demerits -= least_demerits;
if active.demerits < ld_next {
ld_next = active.demerits;
ld_idx = i;
}
// get the new length
let tlen = w.word_nchars
+ args.compute_width(w, active.length, active.fresh)
+ slen
+ active.length;
// if tlen is longer than args.opts.width, we drop this break from the active list
// otherwise, we extend the break, and possibly add a new break at this point
if tlen <= args.opts.width {
// this break will still be active next time
next_active_breaks.push(i);
// we can put this word on this line
active.fresh = false;
active.length = tlen;
// if we're above the minlength, we can also consider breaking here
if tlen >= minlength {
let (new_demerits, new_ratio) = if is_last_word {
// there is no penalty for the final line's length
(0, 0.0)
} else {
compute_demerits(
args.opts.goal as isize - tlen as isize,
stretch,
w.word_nchars as isize,
active.prev_rat,
)
};
// do not even consider adding a line that has too many demerits
// also, try to detect overflow by checking signum
let total_demerits = new_demerits + active.demerits;
if new_demerits < BAD_INFTY_SQ
&& total_demerits < ld_new
&& active.demerits.signum() <= new_demerits.signum()
{
ld_new = total_demerits | {
break_knuth_plass(p_words_words, &mut break_args)
} | conditional_block |
|
linebreak.rs | _sentence_end) = match iter.peek() {
None => (true, true),
Some(&&WordInfo {
sentence_start: st,
new_line: nl,
..
}) => (false, st || (nl && w.ends_punct)),
};
// should we be adding extra space at the beginning of the next sentence?
let slen = compute_slen(args.uniform, w.new_line, is_sentence_start, false);
let mut ld_new = i64::MAX;
let mut ld_next = i64::MAX;
let mut ld_idx = 0;
new_linebreaks.clear();
next_active_breaks.clear();
// go through each active break, extending it and possibly adding a new active
// break if we are above the minimum required length
#[allow(clippy::explicit_iter_loop)]
for &i in active_breaks.iter() {
let active = &mut linebreaks[i];
// normalize demerits to avoid overflow, and record if this is the least
active.demerits -= least_demerits;
if active.demerits < ld_next {
ld_next = active.demerits;
ld_idx = i;
}
// get the new length
let tlen = w.word_nchars
+ args.compute_width(w, active.length, active.fresh)
+ slen
+ active.length;
// if tlen is longer than args.opts.width, we drop this break from the active list
// otherwise, we extend the break, and possibly add a new break at this point
if tlen <= args.opts.width {
// this break will still be active next time
next_active_breaks.push(i);
// we can put this word on this line
active.fresh = false;
active.length = tlen;
// if we're above the minlength, we can also consider breaking here
if tlen >= minlength {
let (new_demerits, new_ratio) = if is_last_word {
// there is no penalty for the final line's length
(0, 0.0)
} else {
compute_demerits(
args.opts.goal as isize - tlen as isize,
stretch,
w.word_nchars as isize,
active.prev_rat,
)
};
// do not even consider adding a line that has too many demerits
// also, try to detect overflow by checking signum
let total_demerits = new_demerits + active.demerits;
if new_demerits < BAD_INFTY_SQ
&& total_demerits < ld_new
&& active.demerits.signum() <= new_demerits.signum()
{
ld_new = total_demerits;
new_linebreaks.push(LineBreak {
prev: i,
linebreak: Some(w),
break_before: false,
demerits: total_demerits,
prev_rat: new_ratio,
length: args.indent_len,
fresh: true,
});
}
}
}
}
// if we generated any new linebreaks, add the last one to the list
// the last one is always the best because we don't add to new_linebreaks unless
// it's better than the best one so far
match new_linebreaks.pop() {
None => (),
Some(lb) => {
next_active_breaks.push(linebreaks.len());
linebreaks.push(lb);
}
}
if next_active_breaks.is_empty() {
// every potential linebreak is too long! choose the linebreak with the least demerits, ld_idx
let new_break =
restart_active_breaks(args, &linebreaks[ld_idx], ld_idx, w, slen, minlength);
next_active_breaks.push(linebreaks.len());
linebreaks.push(new_break);
least_demerits = 0;
} else {
// next time around, normalize out the demerits fields
// on active linebreaks to make overflow less likely
least_demerits = cmp::max(ld_next, 0);
}
// swap in new list of active breaks
mem::swap(active_breaks, next_active_breaks);
// If this was the last word in a sentence, the next one must be the first in the next.
is_sentence_start = is_sentence_end;
}
// return the best path
build_best_path(&linebreaks, active_breaks)
}
fn build_best_path<'a>(paths: &[LineBreak<'a>], active: &[usize]) -> Vec<(&'a WordInfo<'a>, bool)> {
let mut breakwords = vec![];
// of the active paths, we select the one with the fewest demerits
let mut best_idx = match active.iter().min_by_key(|&&a| paths[a].demerits) {
None => crash!(
1,
"Failed to find a k-p linebreak solution. This should never happen."
),
Some(&s) => s,
};
// now, chase the pointers back through the break list, recording
// the words at which we should break
loop {
let next_best = &paths[best_idx];
match next_best.linebreak {
None => return breakwords,
Some(prev) => {
breakwords.push((prev, next_best.break_before));
best_idx = next_best.prev;
}
}
}
}
// "infinite" badness is more like (1+BAD_INFTY)^2 because of how demerits are computed
const BAD_INFTY: i64 = 10_000_000;
const BAD_INFTY_SQ: i64 = BAD_INFTY * BAD_INFTY;
// badness = BAD_MULT * abs(r) ^ 3
const BAD_MULT: f32 = 100.0;
// DR_MULT is multiplier for delta-R between lines
const DR_MULT: f32 = 600.0;
// DL_MULT is penalty multiplier for short words at end of line
const DL_MULT: f32 = 300.0;
fn compute_demerits(delta_len: isize, stretch: isize, wlen: isize, prev_rat: f32) -> (i64, f32) {
// how much stretch are we using?
let ratio = if delta_len == 0 {
0.0f32
} else {
delta_len as f32 / stretch as f32
};
// compute badness given the stretch ratio
let bad_linelen = if ratio.abs() > 1.0f32 {
BAD_INFTY
} else {
(BAD_MULT * ratio.powi(3).abs()) as i64
};
// we penalize lines ending in really short words
let bad_wordlen = if wlen >= stretch {
0
} else {
(DL_MULT
* ((stretch - wlen) as f32 / (stretch - 1) as f32)
.powi(3)
.abs()) as i64
};
// we penalize lines that have very different ratios from previous lines
let bad_delta_r = (DR_MULT * (((ratio - prev_rat) / 2.0).powi(3)).abs()) as i64;
let demerits = i64::pow(1 + bad_linelen + bad_wordlen + bad_delta_r, 2);
(demerits, ratio)
}
fn restart_active_breaks<'a>(
args: &BreakArgs<'a>,
active: &LineBreak<'a>,
act_idx: usize,
w: &'a WordInfo<'a>,
slen: usize,
min: usize,
) -> LineBreak<'a> {
let (break_before, line_length) = if active.fresh {
// never break before a word if that word would be the first on a line
(false, args.indent_len)
} else {
// choose the lesser evil: breaking too early, or breaking too late
let wlen = w.word_nchars + args.compute_width(w, active.length, active.fresh);
let underlen = (min - active.length) as isize;
let overlen = ((wlen + slen + active.length) - args.opts.width) as isize;
if overlen > underlen {
// break early, put this word on the next line
(true, args.indent_len + w.word_nchars)
} else {
(false, args.indent_len)
}
};
// restart the linebreak. This will be our only active path.
LineBreak {
prev: act_idx,
linebreak: Some(w),
break_before,
demerits: 0, // this is the only active break, so we can reset the demerit count
prev_rat: if break_before { 1.0 } else { -1.0 },
length: line_length,
fresh: !break_before,
}
}
// Number of spaces to add before a word, based on mode, newline, sentence start.
fn compute_slen(uniform: bool, newline: bool, start: bool, punct: bool) -> usize { | if uniform || newline {
if start || (newline && punct) {
2
} else { | random_line_split |
|
Helper.js | .parts[0].jogByDistance
jogPace = jogPaceFunction(fillerWorkout.parts[0].jogPace)(tempoPace)
jogTime = jogDistance * jogPace
}
const sprintPaceFunction = (sprintPaceString) => new Function('targetPace', sprintPaceString) //same as jogPaceFunction
const sprintPace = sprintPaceFunction(fillerWorkout.parts[0].sprintPace)(targetPace)
return { //pace in s/m, distance in m, time in s
sprintDistance,
jogTime,
jogPace,
sprintPace,
jogDistance
}
}
const getFartlekTrainingPlan = async (alpha, weekNumber, tempoPace, targetPace) => {
// const fartlek = require('./fartlek.json')
const fartlek = await readJSON('fartlek')
for (let i = 0; i < fartlek.length; i++) {
const fillerWorkout = fartlek[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets: fillerWorkout.parts[0].sprintSets}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
if (alpha < 0.8) {
const sprintSets = fillerWorkout.parts[0].sprintSets + weekNumber - fillerWorkout.parts[0].weekAt
return {...getFartlekWorkout(fillerWorkout, tempoPace, targetPace), sprintSets}
}
const fartlekWorkout = getFartlekWorkout(fillerWorkout, tempoPace, targetPace)
const newSprintPace = fartlekWorkout.sprintPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 0.00250
return {...fartlekWorkout, sprintPace: newSprintPace, sprintSets: fillerWorkout.parts[0].sprintSets}
}
}
}
}
const getLongDistanceTrainingPlan = async (alpha, weekNumber, tempoPace) => {
// const longDistance = require('./longDistance.json')
const longDistance = await readJSON('longDistance')
for (let i = 0; i < longDistance.length; i++) {
const fillerWorkout = longDistance[i]
if (alpha < parseFloat(fillerWorkout.alpha)) {
if (weekNumber === parseFloat(fillerWorkout.parts[0].weekAt)) {
const convertedTempoPace = tempoPace * 1000
return { //runTime in min, tempoPace in s/m, distance in km
runTime: fillerWorkout.parts[0].runTime,
tempoPace,
distance: getRoundedDistance(fillerWorkout.parts[0].runTime, convertedTempoPace)
}
}
if (weekNumber > fillerWorkout.parts[0].weekAt && fillerWorkout.parts[0].end) {
const convertedTempoPace = tempoPace * 1000
const tempoPaceNew = convertedTempoPace - (weekNumber - fillerWorkout.parts[0].weekAt) * 3
const runTime = fillerWorkout.parts[0].runTime
const distance = getRoundedDistance(runTime, tempoPaceNew)
return {distance, runTime, tempoPace: tempoPaceNew / 1000}
}
}
}
}
const getWeeksAndStartDate = (firstWorkoutTimestamp, currentDatestamp) => {
let numberOfWeeksElapsed = 0
let weekStartDatestamp = firstWorkoutTimestamp
while (weekStartDatestamp < currentDatestamp) {
numberOfWeeksElapsed++
weekStartDatestamp += (604800000 * numberOfWeeksElapsed)
}
return {numberOfWeeksElapsed, weekStartDatestamp}
}
const getNextDate = (dateToCompare, previousWorkoutDate) => {
if ((dateToCompare - sanitiseWeekDateStamp(previousWorkoutDate)) < 86400000) return dateToCompare + 86400000
return dateToCompare
}
//todo this function is using test values
const getSuggestedDate = (userInfo, previousWorkout) => {
const sanitisedCurrentDatestamp = sanitiseWeekDateStamp(Date.now())
const {ipptDatestamp} = userInfo
//if close to IPPT date
if ((sanitiseWeekDateStamp(ipptDatestamp) - sanitisedCurrentDatestamp) < (86400000 * 2)) return null
if (!!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^4]/)[0])) {
const firstWorkoutTimestamp = parseInt('1622542227000')
const currentDatestamp = Date.now()
let {numberOfWeeksElapsed} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
return getNextDate(nextWeekStart, previousWorkout.date)
}
return getNextDate(sanitisedCurrentDatestamp, previousWorkout.date)
// return getNextDate(sanitisedCurrentDatestamp, Date.now())
}
const getOneOfThreeTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek) => {
const firstWorkoutTimestamp = parseInt('1622542227000')
const {workoutFrequency, ipptDatestamp} = userInfo
const currentDatestamp = Date.now()
userInfo.duration = 8//todo Math.floor(ipptDatestamp - currentDatestamp)
const previousWorkoutDatestamp = previousWorkout ? previousWorkout.date : ''
let {numberOfWeeksElapsed, weekStartDatestamp} = getWeeksAndStartDate(firstWorkoutTimestamp, currentDatestamp)
weekStartDatestamp = sanitiseWeekDateStamp(weekStartDatestamp)
const nextWeekStart = sanitiseWeekDateStamp((604800000 * (numberOfWeeksElapsed + 1)) + firstWorkoutTimestamp)
const tempoPace = getPaces(targetPace, cNewbieGains)[0]
const isPreviousWorkoutIntervalWorkout = !!(previousWorkout.workout_ID && previousWorkout.workout_ID.match(/^[123]/)[0])
if ((ipptDatestamp - currentDatestamp) < 604800000) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
if (workoutFrequency === 1 || !(Object.keys(previousWorkout).length > 0)) return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace);
if (workoutFrequency === 2) {
if (isPreviousWorkoutIntervalWorkout && previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
}
if (workoutFrequency === 3) {
if (previousWorkoutDatestamp > weekStartDatestamp && currentDatestamp < nextWeekStart) {
if (isPreviousWorkoutIntervalWorkout) return getLongDistanceTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace)
return getFartlekTrainingPlan(alpha, numberOfWeeksElapsed, tempoPace, targetPace)
}
}
return getIntervalTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace)
}
export const getTrainingPlan = (questionnaireData, workouts, previousWorkout = {}, previousFitness = 100) => {
const [primary, secondary, pyramid, longDistance, fartlek] = workouts
if (questionnaireData.regular) {
//TBC logic
}
const userInfo = getUserInfo(questionnaireData, previousFitness);
const {alpha, beta, cNewbieGains} = generateConstants(questionnaireData);
const {targetPace, displayPace} = getTargetPaces(userInfo.targetTime);
const suggestedDate = getSuggestedDate(userInfo, previousWorkout)
const {
newFitness,
trainingPlan
} = getOneOfThreeTrainingPlan(targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace, alpha, pyramid, longDistance, fartlek);
return {newFitness, trainingPlan, suggestedDate};
};
export async function getJSON(url) | {
try {
const raw = await fetch(url);
return await raw.json();
} catch (error) {
throw error;
}
} | identifier_body |
|
Helper.js | 2. If workout turns out to be a failure or success
a. if previous workout is success, continue with next workout, change nothing
b. if workout is a failure and fail_count == 0,
i. Set k = 1.2, fail_count++
c. if workout is a failure and fail_count == 1,
i. Set x = P(avg)
d. if workout is a breakthrough and breakthrough_count == 0,
i. breakthrough count++
e. if workout is a breakthrough and breakthrough _count == 1,
i. Set x = P(avg)
*/
};
/// for the first time we are calling get_diffs, we use 100. For the second stage, we use the calculated diff
const checkDiff = (diffs, diff) => {
if (diffs[diff]) {
return diffs[diff];
}
return 100;
};
const getDiffs = (velocityToCompare, velocities, intermediateFunc, x = 1, differences = {}) => {
let diffs = {};
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
if (velocityToCompare < teVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") +
x * (deltas[0] * teVelocity * Math.exp(teVelocity - velocityToCompare));
} else if (velocityToCompare < ltVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") -
x * intermediateFunc(deltas[1], teVelocity, velocityToCompare);
} else if (velocityToCompare < vVelocity) {
diffs.ltDiff =
checkDiff(differences, "ltDiff") -
x * intermediateFunc(deltas[2], ltVelocity, velocityToCompare);
} else if (velocityToCompare < stVelocity) {
diffs.vDiff =
checkDiff(differences, "vDiff") -
x * intermediateFunc(deltas[3], vVelocity, velocityToCompare);
// console.log(checkDiff(differences, 'vDiff'))
} else {
diffs.stDiff =
checkDiff(differences, "stDiff") -
x * intermediateFunc(deltas[4], stVelocity, velocityToCompare);
}
return diffs;
};
export const calculateDifficulties = (velocities, currentVelocity) => {
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
const diffs = getDiffs(currentVelocity, velocities, intermediateFunc);
while (Object.keys(diffs).length < 4) {
if (diffs.teDiff && !diffs.ltDiff) {
diffs.ltDiff = diffs.teDiff + intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (diffs.ltDiff && !(diffs.teDiff && diffs.vDiff)) {
if (!diffs.teDiff) {
diffs.teDiff = diffs.ltDiff - intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (!diffs.vDiff) {
diffs.vDiff = diffs.ltDiff + intermediateFunc(deltas[2], ltVelocity, vVelocity);
}
}
if (diffs.vDiff && !(diffs.ltDiff && diffs.stDiff)) {
if (!diffs.ltDiff) {
diffs.ltDiff = diffs.vDiff - intermediateFunc(deltas[2], ltVelocity, vVelocity);
}
if (!diffs.stDiff) {
diffs.stDiff = diffs.vDiff + intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
if (diffs.stDiff && !diffs.vDiff) {
diffs.vDiff = diffs.stDiff - intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
return diffs;
}
export const getSpeedDifficulty = (currentVelocity, targetVelocity, velocities) => {
//todo why so many diffs. floating around? get rid of them
const diffs = calculateDifficulties(velocities, currentVelocity);
const finalDiffs = getDiffs(targetVelocity, velocities, intermediateFunc, -1, diffs);
if (Object.values(finalDiffs).length === 1) {
return Object.values(finalDiffs)[0];
}
return 0;
};
export const generateConstants = (questionnaireData) => {
//todo verify personalbests below
const beta = questionnaireData.regular ? 1 : 0.975;
const alpha = Math.max(
0,
Math.min(
1,
(1 / 3) *
beta *
((questionnaireData.frequency * questionnaireData.distance) / 30 +
questionnaireData.experience / 36 +
questionnaireData.frequency / 3)
)
);
/* old code
Math.min(
1,
(1 / 3) *
beta *
((answers.fFrequency * answers.dDistance) / 30 +
answers.lMonths / 36 +
answers.fFrequency / 3)
)
);
*/
const cNewbieGains = (1 / rho) * Math.exp(1 - alpha) + (rho - 1) / rho;
return {alpha, beta, cNewbieGains};
};
// todo edit this again
const getBestTrainingPlan = (trainingPlanPrimary, trainingPlanSecondary) =>
trainingPlanPrimary[0] > trainingPlanSecondary[0] /*&&
trainingPlanPrimary[0] - trainingPlanSecondary[0] < 3 &&
trainingPlanPrimary[1]["personalisedDifficultyMultiplier"] <
trainingPlanSecondary[1]["personalisedDifficultyMultiplier"];*/
export function | (questionnaireData, previousFitness) {
const {duration, workoutFrequency} = questionnaireData
//todo fix currentFitness
return {
currentTime: convertToSeconds(questionnaireData.latest),
targetTime: convertToSeconds(questionnaireData.target),
duration,
workoutFrequency,
currentFitness: previousFitness,
};
}
export const generateTrainingPlans = (speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout) => {
const {newFitness, targetDifficulty} = getOverallFitness(
speedDifficulty,
userInfo.duration,
userInfo.currentFitness,
previousWorkout,
);
const getPersonalisedDifficulty = (workout) => {
const temp = JSON.parse(JSON.stringify(workout));
temp.personalisedDifficultyMultiplier =
(speedDifficulty / 100) * workout.difficultyMultiplier * restMultiplier(workout, targetPace); // * 100
return temp;
};
const reducer = (variance, workout) => {
const workoutVariance = Math.abs(workout.personalisedDifficultyMultiplier - targetDifficulty);
if (workoutVariance > variance[0]) {
return variance;
}
return [workoutVariance, workout]; //return [workoutVariance, ...workout];
};
const primaryIntervalsCopy = primary.map(getPersonalisedDifficulty);
const secondaryIntervalsCopy = secondary.map(getPersonalisedDifficulty);
const trainingPlanPrimary = primaryIntervalsCopy.reduce(reducer, [10000]);
const trainingPlanSecondary = secondaryIntervalsCopy.reduce(reducer, [trainingPlanPrimary[1]]);
return {trainingPlanPrimary, trainingPlanSecondary, newFitness};
}
const getIntervalTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace) => {
const velocities = getVelocities(getPaces(targetPace, cNewbieGains));
// velocities in km/hr, paces in s/m
const speedDifficulty = getSpeedDifficulty(convertToVelocity(userInfo.currentTime), convertToVelocity(userInfo.targetTime), velocities); // getSpeedDifficulty(currentVelocity, paces);
const {
trainingPlanPrimary,
trainingPlanSecondary,
newFitness
} = generateTrainingPlans(speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout);
// console.log(JSON.stringify(trainingPlanPrimary), JSON.stringify(trainingPlanSecondary))
let trainingPlan = getBestTrainingPlan(trainingPlanPrimary, trainingPlanSecondary)
? trainingPlanSecondary[1]
: trainingPlanPrimary[1];
trainingPlan.parts[0]["rest"] = getPrescribedRest(trainingPlan.parts[0]["restMultiplier"], targetPace);
trainingPlan.parts[0]["pace"] = displayPace
return {newFitness, trainingPlan};
}
const readJSON = async (name) => {
return fetch("./" + name + ".json")
.then(async response => {
return await response.json();
})
}
const getRoundedDistance = (time, tempoPace) => Math.ceil((time * 60 / tempoPace) / 0.5) * 0.5;
const getFartlekWorkout = (fillerWorkout, tempoPace, targetPace) => {
let jogTime, jogDistance, jogPace
const {sprintDistance} = fillerWorkout.parts[0]
const jogPaceFunction = (jogPaceString) => new Function('tempoPace', jogPaceString)
/* the Python version had a better implementation of jogPaceFunction --> rather than adding a string to the database, we simply create an array like this ['tempoPace', '0.5'] where 0.5 is meant to be added to tempoPace
def get_sprint_pace(sprintPace):
y = 0
| getUserInfo | identifier_name |
Helper.js | 2. If workout turns out to be a failure or success
a. if previous workout is success, continue with next workout, change nothing
b. if workout is a failure and fail_count == 0,
i. Set k = 1.2, fail_count++
c. if workout is a failure and fail_count == 1,
i. Set x = P(avg)
d. if workout is a breakthrough and breakthrough_count == 0,
i. breakthrough count++
e. if workout is a breakthrough and breakthrough _count == 1,
i. Set x = P(avg)
*/
};
/// for the first time we are calling get_diffs, we use 100. For the second stage, we use the calculated diff
const checkDiff = (diffs, diff) => {
if (diffs[diff]) {
return diffs[diff];
}
return 100;
};
const getDiffs = (velocityToCompare, velocities, intermediateFunc, x = 1, differences = {}) => {
let diffs = {};
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
if (velocityToCompare < teVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") +
x * (deltas[0] * teVelocity * Math.exp(teVelocity - velocityToCompare));
} else if (velocityToCompare < ltVelocity) {
diffs.teDiff =
checkDiff(differences, "teDiff") -
x * intermediateFunc(deltas[1], teVelocity, velocityToCompare);
} else if (velocityToCompare < vVelocity) {
diffs.ltDiff =
checkDiff(differences, "ltDiff") -
x * intermediateFunc(deltas[2], ltVelocity, velocityToCompare);
} else if (velocityToCompare < stVelocity) {
diffs.vDiff =
checkDiff(differences, "vDiff") -
x * intermediateFunc(deltas[3], vVelocity, velocityToCompare);
// console.log(checkDiff(differences, 'vDiff'))
} else {
diffs.stDiff =
checkDiff(differences, "stDiff") -
x * intermediateFunc(deltas[4], stVelocity, velocityToCompare);
}
return diffs;
};
export const calculateDifficulties = (velocities, currentVelocity) => {
const [teVelocity, ltVelocity, vVelocity, stVelocity] = velocities;
const diffs = getDiffs(currentVelocity, velocities, intermediateFunc);
while (Object.keys(diffs).length < 4) {
if (diffs.teDiff && !diffs.ltDiff) {
diffs.ltDiff = diffs.teDiff + intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (diffs.ltDiff && !(diffs.teDiff && diffs.vDiff)) {
if (!diffs.teDiff) {
diffs.teDiff = diffs.ltDiff - intermediateFunc(deltas[1], teVelocity, ltVelocity);
}
if (!diffs.vDiff) {
diffs.vDiff = diffs.ltDiff + intermediateFunc(deltas[2], ltVelocity, vVelocity);
} | }
if (!diffs.stDiff) {
diffs.stDiff = diffs.vDiff + intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
if (diffs.stDiff && !diffs.vDiff) {
diffs.vDiff = diffs.stDiff - intermediateFunc(deltas[3], vVelocity, stVelocity);
}
}
return diffs;
}
export const getSpeedDifficulty = (currentVelocity, targetVelocity, velocities) => {
//todo why so many diffs. floating around? get rid of them
const diffs = calculateDifficulties(velocities, currentVelocity);
const finalDiffs = getDiffs(targetVelocity, velocities, intermediateFunc, -1, diffs);
if (Object.values(finalDiffs).length === 1) {
return Object.values(finalDiffs)[0];
}
return 0;
};
export const generateConstants = (questionnaireData) => {
//todo verify personalbests below
const beta = questionnaireData.regular ? 1 : 0.975;
const alpha = Math.max(
0,
Math.min(
1,
(1 / 3) *
beta *
((questionnaireData.frequency * questionnaireData.distance) / 30 +
questionnaireData.experience / 36 +
questionnaireData.frequency / 3)
)
);
/* old code
Math.min(
1,
(1 / 3) *
beta *
((answers.fFrequency * answers.dDistance) / 30 +
answers.lMonths / 36 +
answers.fFrequency / 3)
)
);
*/
const cNewbieGains = (1 / rho) * Math.exp(1 - alpha) + (rho - 1) / rho;
return {alpha, beta, cNewbieGains};
};
// todo edit this again
const getBestTrainingPlan = (trainingPlanPrimary, trainingPlanSecondary) =>
trainingPlanPrimary[0] > trainingPlanSecondary[0] /*&&
trainingPlanPrimary[0] - trainingPlanSecondary[0] < 3 &&
trainingPlanPrimary[1]["personalisedDifficultyMultiplier"] <
trainingPlanSecondary[1]["personalisedDifficultyMultiplier"];*/
export function getUserInfo(questionnaireData, previousFitness) {
const {duration, workoutFrequency} = questionnaireData
//todo fix currentFitness
return {
currentTime: convertToSeconds(questionnaireData.latest),
targetTime: convertToSeconds(questionnaireData.target),
duration,
workoutFrequency,
currentFitness: previousFitness,
};
}
export const generateTrainingPlans = (speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout) => {
const {newFitness, targetDifficulty} = getOverallFitness(
speedDifficulty,
userInfo.duration,
userInfo.currentFitness,
previousWorkout,
);
const getPersonalisedDifficulty = (workout) => {
const temp = JSON.parse(JSON.stringify(workout));
temp.personalisedDifficultyMultiplier =
(speedDifficulty / 100) * workout.difficultyMultiplier * restMultiplier(workout, targetPace); // * 100
return temp;
};
const reducer = (variance, workout) => {
const workoutVariance = Math.abs(workout.personalisedDifficultyMultiplier - targetDifficulty);
if (workoutVariance > variance[0]) {
return variance;
}
return [workoutVariance, workout]; //return [workoutVariance, ...workout];
};
const primaryIntervalsCopy = primary.map(getPersonalisedDifficulty);
const secondaryIntervalsCopy = secondary.map(getPersonalisedDifficulty);
const trainingPlanPrimary = primaryIntervalsCopy.reduce(reducer, [10000]);
const trainingPlanSecondary = secondaryIntervalsCopy.reduce(reducer, [trainingPlanPrimary[1]]);
return {trainingPlanPrimary, trainingPlanSecondary, newFitness};
}
const getIntervalTrainingPlan = (targetPace, cNewbieGains, userInfo, primary, secondary, previousWorkout, displayPace) => {
const velocities = getVelocities(getPaces(targetPace, cNewbieGains));
// velocities in km/hr, paces in s/m
const speedDifficulty = getSpeedDifficulty(convertToVelocity(userInfo.currentTime), convertToVelocity(userInfo.targetTime), velocities); // getSpeedDifficulty(currentVelocity, paces);
const {
trainingPlanPrimary,
trainingPlanSecondary,
newFitness
} = generateTrainingPlans(speedDifficulty, targetPace, userInfo, primary, secondary, previousWorkout);
// console.log(JSON.stringify(trainingPlanPrimary), JSON.stringify(trainingPlanSecondary))
let trainingPlan = getBestTrainingPlan(trainingPlanPrimary, trainingPlanSecondary)
? trainingPlanSecondary[1]
: trainingPlanPrimary[1];
trainingPlan.parts[0]["rest"] = getPrescribedRest(trainingPlan.parts[0]["restMultiplier"], targetPace);
trainingPlan.parts[0]["pace"] = displayPace
return {newFitness, trainingPlan};
}
const readJSON = async (name) => {
return fetch("./" + name + ".json")
.then(async response => {
return await response.json();
})
}
const getRoundedDistance = (time, tempoPace) => Math.ceil((time * 60 / tempoPace) / 0.5) * 0.5;
const getFartlekWorkout = (fillerWorkout, tempoPace, targetPace) => {
let jogTime, jogDistance, jogPace
const {sprintDistance} = fillerWorkout.parts[0]
const jogPaceFunction = (jogPaceString) => new Function('tempoPace', jogPaceString)
/* the Python version had a better implementation of jogPaceFunction --> rather than adding a string to the database, we simply create an array like this ['tempoPace', '0.5'] where 0.5 is meant to be added to tempoPace
def get_sprint_pace(sprintPace):
y = 0
| }
if (diffs.vDiff && !(diffs.ltDiff && diffs.stDiff)) {
if (!diffs.ltDiff) {
diffs.ltDiff = diffs.vDiff - intermediateFunc(deltas[2], ltVelocity, vVelocity); | random_line_split |
knapsack.py | else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size) | new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate | action = (np.argmax(qval))
# Take action, observe new state S' | random_line_split |
knapsack.py | else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
| xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate | filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler() | identifier_body |
knapsack.py | (file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add | read_file | identifier_name |
|
knapsack.py |
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate | return x_test, ticker | conditional_block |
|
DAC16bit.py | self.Halfrange = Halfrange
self.communication_bytes = 3
if numdacs % 4 == 0 and numdacs > 0:
self._numdacs = int(numdacs)
else:
logging.error('Number of dacs needs to be multiple of 4')
# initialize pol_num, the voltage offset due to the polarity
self.pol_num = np.zeros(self._numdacs)
for i in range(int(self._numdacs / 4)):
self.set_pol_dacrack(polarity[i], np.arange(1 + i * 4, 1 + (i + 1) * 4),
get_all=False)
# Add functions
#self.add_function('get_all')
#self.add_function('set_dacs_zero')
#self.add_function('reinitialize_dacs')
for i in range(1, numdacs + 1):
self.add_parameter(
'dac{}'.format(i),
label='Dac {}'.format(i),
unit='mV',
get_cmd=self._gen_ch_get_func(self.do_get_dac, i),
set_cmd=self._gen_ch_set_func(self.do_set_dac, i),
vals=vals.Numbers(self.pol_num[i - 1]-1,
self.pol_num[i - 1] + self.Fullrange+1),
step=dac_step,
delay=dac_delay,
max_val_age=10)
self._open_serial_connection()
#open serial connection
def _open_serial_connection(self):
self.ser = serial.Serial()
self.ser.port = self._interface
self.ser.baudrate = 10000000
self.ser.bytesize = serial.EIGHTBITS #number of bits per bytes
self.ser.parity = serial.PARITY_ODD #set parity check: no parity
self.ser.stopbits = serial.STOPBITS_ONE #number of stop bits
self.ser.timeout = 1 #non-block read
self.ser.xonxoff = False #disable software flow control
self.ser.rtscts = False #disable hardware (RTS/CTS) flow control
self.ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
try:
self.ser.open()
except:
logging.warning('Error open serial port')
print ('error open serial port')
self.ser.close()
self.ser.open()
raise Exception()
if not self.ser.isOpen():
logging.error('Serial port not open')
print ('serial port not open')
raise Exception()
logging.info('Serial port opened: ' + self.ser.portstr)
# close serial connection
def _close_serial_connection(self):
'''
Closes the serial connection
Input:
None
Output:
None
'''
logging.debug('Closing serial connection')
print ('closing serial connection')
# vpp43.close(self._vi) # OLD
self.ser.close()
def reset(self):
'''
Resets all dacs to 0 volts
Input:
None
Output:
None
'''
logging.info('Resetting instrument')
self.set_dacs_zero()
self.get_all()
def set_dacs_zero(self):
for i in range(self._numdacs):
self.do_set_dac(0,i+1)
def reinitialize_dacs(self):
bytetosend = 0b11100000 #111 is a re init all dacs
message = "%c" % (bytetosend)
reply = self._send_and_read(message.encode(), self.communication_bytes)
return reply
# Conversion of data
def _mvoltage_to_bytes(self, mvoltage):
'''
Converts a mvoltage on a -10V to 10V scale to a 20-bit equivalent
output is a list of three bytes
Input:
mvoltage (float) : a mvoltage in the 0mV-4000mV range
Output:
(dataH, dataL) (int, int) : The high and low value byte equivalent
'''
#//+10V=01111111111111111111
#//-10V=10000000000000000000
# logging.info('mvoltstobytes, voltage:')
# logging.info(mvoltage)
# if(mvoltage>0):
# data = int(((float(mvoltage)/1000)+2)*((2**16-1)/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# #data = data | 0b10000000000000000000
# logging.info("negative, data:")
# #data = bin(data)
# logging.info(data)
#bytevalue = int(round(mvoltage/4000.0*65535))
#dataH = int(bytevalue/256)
#dataL = bytevalue - dataH*256
#return (dataH, dataL)
bytevalue = int(round((mvoltage+self.Halfrange) / self.Fullrange * 65535))
return bytevalue
# def _numbers_to_mvoltages(self, numbers):
# '''
# Converts a list of bytes to a list containing
# the corresponding mvoltages
# '''
# values = np.ones(self._numdacs) #initializes the values array to all ones
# #//calculate the bits to send to the dac out of the input values
# #//D= 20bit input code
# for i in range(self._numdacs):
# bitValue = ((numbers[5 + 3*i]<<8) + (numbers[6 + 3*i]<<0))
# if (bitValue & 0b1000000000000000): #check if the number is positive
# #logging.info(i)
# #logging.info('negative number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=(float(bitValue)/((2**16-1)/2))*(self.Halfrange/1000) #multiply with 2V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# else:
# #logging.info(i)
# #logging.info('positive number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=-(self.Halfrange/1000)+(float(bitValue)/(65536.0/2))*(self.Halfrange/1000) #multiply with 10V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# #values[i] = int(((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)),2)-(1<<20)
# #values[i] = (( 20*((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)) )/ 1048575.0) + 10
# #logging.info('DAC: ')
# #logging.info(numbers[4 + 4*i | self._interface = interface
self.Fullrange = Fullrange | random_line_split |
|
DAC16bit.py | self.ser.open()
raise Exception()
if not self.ser.isOpen():
logging.error('Serial port not open')
print ('serial port not open')
raise Exception()
logging.info('Serial port opened: ' + self.ser.portstr)
# close serial connection
def _close_serial_connection(self):
'''
Closes the serial connection
Input:
None
Output:
None
'''
logging.debug('Closing serial connection')
print ('closing serial connection')
# vpp43.close(self._vi) # OLD
self.ser.close()
def reset(self):
'''
Resets all dacs to 0 volts
Input:
None
Output:
None
'''
logging.info('Resetting instrument')
self.set_dacs_zero()
self.get_all()
def set_dacs_zero(self):
for i in range(self._numdacs):
self.do_set_dac(0,i+1)
def reinitialize_dacs(self):
bytetosend = 0b11100000 #111 is a re init all dacs
message = "%c" % (bytetosend)
reply = self._send_and_read(message.encode(), self.communication_bytes)
return reply
# Conversion of data
def _mvoltage_to_bytes(self, mvoltage):
'''
Converts a mvoltage on a -10V to 10V scale to a 20-bit equivalent
output is a list of three bytes
Input:
mvoltage (float) : a mvoltage in the 0mV-4000mV range
Output:
(dataH, dataL) (int, int) : The high and low value byte equivalent
'''
#//+10V=01111111111111111111
#//-10V=10000000000000000000
# logging.info('mvoltstobytes, voltage:')
# logging.info(mvoltage)
# if(mvoltage>0):
# data = int(((float(mvoltage)/1000)+2)*((2**16-1)/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# #data = data | 0b10000000000000000000
# logging.info("negative, data:")
# #data = bin(data)
# logging.info(data)
#bytevalue = int(round(mvoltage/4000.0*65535))
#dataH = int(bytevalue/256)
#dataL = bytevalue - dataH*256
#return (dataH, dataL)
bytevalue = int(round((mvoltage+self.Halfrange) / self.Fullrange * 65535))
return bytevalue
# def _numbers_to_mvoltages(self, numbers):
# '''
# Converts a list of bytes to a list containing
# the corresponding mvoltages
# '''
# values = np.ones(self._numdacs) #initializes the values array to all ones
# #//calculate the bits to send to the dac out of the input values
# #//D= 20bit input code
# for i in range(self._numdacs):
# bitValue = ((numbers[5 + 3*i]<<8) + (numbers[6 + 3*i]<<0))
# if (bitValue & 0b1000000000000000): #check if the number is positive
# #logging.info(i)
# #logging.info('negative number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=(float(bitValue)/((2**16-1)/2))*(self.Halfrange/1000) #multiply with 2V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# else:
# #logging.info(i)
# #logging.info('positive number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=-(self.Halfrange/1000)+(float(bitValue)/(65536.0/2))*(self.Halfrange/1000) #multiply with 10V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# #values[i] = int(((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)),2)-(1<<20)
# #values[i] = (( 20*((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)) )/ 1048575.0) + 10
# #logging.info('DAC: ')
# #logging.info(numbers[4 + 4*i] )
# #logging.info('Val: ')
# #logging.info(values[i])
# return values
# #return numbers
def _numbers_to_mvoltages(self, byte_mess):
'''
Converts a list of bytes to a list containing
the corresponding mvoltages
'''
values = list(range(self._numdacs))
for i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communication with device
def | (self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages[channel - 1]
def do_set_dac(self, mvoltage, channel):
'''
Sets the specified dac to the specified voltage
Input:
mvoltage (float) : output voltage in mV
channel (int) : 1 based index of the dac
Output:
reply (string) : errormessage
'''
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
#logging.info('mvoltage after m_to_bytes: ')
#logging.info(mvoltage)
#logging.info('bin(channel: ')
#logging.info(bin(channel))
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)] #0xff is 255
channel = (int(channel)-1) | 0b10000000 #100 is a write operation
#message = "%c%c%c%c" % (channel,mvoltage_bytes[0 | do_get_dac | identifier_name |
DAC16bit.py | from mV to V and multiply with the resolution, divide by the 10V max
# logging.info("positive, data:")
# #data = bin(int(data) & 0xffffffff)
# logging.info(data)
# else:
# data = int(((float(mvoltage)/1000)+2)*(2**16/2)/(self.Halfrange/1000)) #from mV to V and multiply with the resolution, divide by the 10V max
# #data = data | 0b10000000000000000000
# logging.info("negative, data:")
# #data = bin(data)
# logging.info(data)
#bytevalue = int(round(mvoltage/4000.0*65535))
#dataH = int(bytevalue/256)
#dataL = bytevalue - dataH*256
#return (dataH, dataL)
bytevalue = int(round((mvoltage+self.Halfrange) / self.Fullrange * 65535))
return bytevalue
# def _numbers_to_mvoltages(self, numbers):
# '''
# Converts a list of bytes to a list containing
# the corresponding mvoltages
# '''
# values = np.ones(self._numdacs) #initializes the values array to all ones
# #//calculate the bits to send to the dac out of the input values
# #//D= 20bit input code
# for i in range(self._numdacs):
# bitValue = ((numbers[5 + 3*i]<<8) + (numbers[6 + 3*i]<<0))
# if (bitValue & 0b1000000000000000): #check if the number is positive
# #logging.info(i)
# #logging.info('negative number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=(float(bitValue)/((2**16-1)/2))*(self.Halfrange/1000) #multiply with 2V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# else:
# #logging.info(i)
# #logging.info('positive number bin(:')
# bitValue=bitValue & 0b0111111111111111 #strip the first bit
# #logging.info(bin(bitValue))
# values[i]=-(self.Halfrange/1000)+(float(bitValue)/(65536.0/2))*(self.Halfrange/1000) #multiply with 10V
# #logging.info('values[i]:')
# #logging.info(values[i])
# values[i]=values[i]*1000 # to get to mV
# #logging.info('values[i]*1000:')
# #logging.info(values[i])
# #values[i] = int(((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)),2)-(1<<20)
# #values[i] = (( 20*((numbers[5 + 4*i]<<16) + (numbers[6 + 4*i]<<8) + (numbers[7 + 4*i]<<0)) )/ 1048575.0) + 10
# #logging.info('DAC: ')
# #logging.info(numbers[4 + 4*i] )
# #logging.info('Val: ')
# #logging.info(values[i])
# return values
# #return numbers
def _numbers_to_mvoltages(self, byte_mess):
'''
Converts a list of bytes to a list containing
the corresponding mvoltages
'''
values = list(range(self._numdacs))
for i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communication with device
def do_get_dac(self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages[channel - 1]
def do_set_dac(self, mvoltage, channel):
'''
Sets the specified dac to the specified voltage
Input:
mvoltage (float) : output voltage in mV
channel (int) : 1 based index of the dac
Output:
reply (string) : errormessage
'''
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
#logging.info('mvoltage after m_to_bytes: ')
#logging.info(mvoltage)
#logging.info('bin(channel: ')
#logging.info(bin(channel))
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)] #0xff is 255
channel = (int(channel)-1) | 0b10000000 #100 is a write operation
#message = "%c%c%c%c" % (channel,mvoltage_bytes[0], mvoltage_bytes[1], mvoltage_bytes[2])
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
#logging.info('bin(message: ')
#logging.info(bin(mvoltage_bytes[0]))
#logging.info(bin(mvoltage_bytes[1]))
#logging.info(bin(mvoltage_bytes[2]))
#logging.info('message: ')
#logging.info(message)
reply = self._send_and_read(message, self.communication_bytes)
#logging.info('bin(reply: ')
#logging.info(bin(reply[0]))
#logging.info(bin(reply[1]))
#logging.info(bin(reply[2]))
#logging.info(bin(reply[3]))
return reply
def do_set_dac_fast(self, mvoltage, channel): #added by Daniel, seems to work
if channel>4:
print('Error: Only channels 1-4 have fast setting.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b11000000 #110 is a write fast operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, 0)
return reply
def do_ramp_dac(self, mvoltage, channel): #added by Daniel, fucks it up completly right now...
| if channel>2:
print('Error: Only channels 1-2 have ramping.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b10100000 #110 is a ramp operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, self.communication_bytes)
return reply | identifier_body |
|
DAC16bit.py | i in range(self._numdacs):
# takes two bytes, converts it to a 16 bit int and then divides by
# the range and adds the offset due to the polarity
values[i] = ((byte_mess[5 + 3 * i] * 256 + byte_mess[6 + 3 * i]) /
65535.0 * self.Fullrange)-self.Halfrange # + self.pol_num[i]
return values
# Communication with device
def do_get_dac(self, channel):
'''
Returns the value of the specified dac
Input:
channel (int) : 1 based index of the dac
Output:
voltage (float) : dacvalue in mV
'''
logging.info('Reading dac%s', channel)
mvoltages = self._get_dacs()
logging.info(mvoltages)
#return mvoltages[channel - 1]
return mvoltages[channel - 1]
def do_set_dac(self, mvoltage, channel):
'''
Sets the specified dac to the specified voltage
Input:
mvoltage (float) : output voltage in mV
channel (int) : 1 based index of the dac
Output:
reply (string) : errormessage
'''
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
#logging.info('mvoltage after m_to_bytes: ')
#logging.info(mvoltage)
#logging.info('bin(channel: ')
#logging.info(bin(channel))
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)] #0xff is 255
channel = (int(channel)-1) | 0b10000000 #100 is a write operation
#message = "%c%c%c%c" % (channel,mvoltage_bytes[0], mvoltage_bytes[1], mvoltage_bytes[2])
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
#logging.info('bin(message: ')
#logging.info(bin(mvoltage_bytes[0]))
#logging.info(bin(mvoltage_bytes[1]))
#logging.info(bin(mvoltage_bytes[2]))
#logging.info('message: ')
#logging.info(message)
reply = self._send_and_read(message, self.communication_bytes)
#logging.info('bin(reply: ')
#logging.info(bin(reply[0]))
#logging.info(bin(reply[1]))
#logging.info(bin(reply[2]))
#logging.info(bin(reply[3]))
return reply
def do_set_dac_fast(self, mvoltage, channel): #added by Daniel, seems to work
if channel>4:
print('Error: Only channels 1-4 have fast setting.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b11000000 #110 is a write fast operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, 0)
return reply
def do_ramp_dac(self, mvoltage, channel): #added by Daniel, fucks it up completly right now...
if channel>2:
print('Error: Only channels 1-2 have ramping.')
else:
logging.info('Setting dac%s to %.04f mV', channel, mvoltage)
mvoltage = self._mvoltage_to_bytes(mvoltage)
mvoltage_bytes = [0,0]
mvoltage_bytes = [mvoltage >> i & 0xff for i in (8,0)]
channel = (int(channel)-1) | 0b10100000 #110 is a ramp operation
message = [channel,mvoltage_bytes[0], mvoltage_bytes[1]]
reply = self._send_and_read(message, self.communication_bytes)
return reply
def do_set_trigger(self):
'''
Sets the trigger; trigger is 1ms and around 4.2V
Input:
none
Output:
reply (string) : errormessage
'''
logging.debug('Trigger out')
message = "%c%c%c%c" % (4, 0, 2, 6)
reply = self._send_and_read(message.encode())
return reply
def get_dacs(self):
mvoltages = self._get_dacs()
for i in range(self._numdacs):
print('dac{}: '.format(i+1)+str(mvoltages[i]))
return mvoltages
def _get_dacs(self):
'''
Reads from device and returns all dacvoltages in a list
Input:
None
Output:
voltages (float[]) : list containing all dacvoltages (in mV)
'''
logging.debug('Getting dac voltages from instrument')
# first 3 bit are control, last 5 DAC number
message = '\x40' #0b01000000 = 010 = read all dacs
#logging.info(sys.getsizeof(message))
reply = self._send_and_read(message.encode(), self._numdacs*self.communication_bytes+4)
#logging.info(reply)
mvoltages = self._numbers_to_mvoltages(reply)
return mvoltages
def _send_and_read(self, message, bytestoread):
'''
Send <message> to the device and read answer.
Raises an error if one occurred
Returns a list of bytes
Input:
message (string) : string conform the IST_20 protocol
Output:
data_out_numbers (int[]) : return message
'''
logging.info('Sending %r', message)
# clear input buffer
self.ser.flushInput()
#logging.info('Flushed input')
#vpp43.write(self._vi, message) # OLD
self.ser.write(message) # NEW
#logging.info('Wrote Message')
# In stead of blocking, we could also poll, but it's a bit slower
# print visafunc.get_navail(self.lib, self._vi)
# if not visafunc.wait_data(self._vi, 2, 0.5):
# logging.error('Failed to receive reply from IST_20 rack')
# return False
#data1 = visafunc.readn(self._vi, 2) # OLD
#sleep(2)
#logging.info(self.ser.readline())
s=0
data1 = []
while s < bytestoread:
data1.append(ord(self.ser.read()))
#logging.info(s)
s=s+1
#data1 = [ord(s) for s in data1]
#data2 = np.reshape(data1,(-1,4))
#logging.info('finished reading')
#data2 = np.uint32(data1) #from string to 32bit
data2 = data1
#logging.info('converted to uint32')
#logging.info('sendAndRead: %s', data2)
return data2
def set_pol_dacrack(self, flag, channels, get_all=True):
'''
Changes the polarity of the specified set of dacs
Input:
flag (string) : 'BIP', 'POS' or 'NEG'
channel (int) : 0 based index of the rack
get_all (boolean): if True (default) perform a get_all
Output:
None
'''
flagmap = {'NEG': -self.Fullrange, 'BIP': -self.Halfrange, 'POS': 0}
if flag.upper() not in flagmap:
raise KeyError('Tried to set invalid dac polarity %s', flag)
val = flagmap[flag.upper()]
for ch in channels:
self.pol_num[ch - 1] = val
# self.set_parameter_bounds('dac%d' % (i+1), val, val +
# self.Fullrange.0)
if get_all:
self.get_all()
def get_pol_dac(self, channel):
'''
Returns the polarity of the dac channel specified
Input:
channel (int) : 1 based index of the dac
Output:
polarity (string) : 'BIP', 'POS' or 'NEG'
'''
val = self.pol_num[channel - 1]
if (val == -self.Fullrange):
return 'NEG'
elif (val == -self.Halfrange):
return 'BIP'
elif (val == 0):
| return 'POS' | conditional_block |
|
gm.go | �理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: " + proto.Cmd))
}
//gogen:iface
func (m *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.BaseY()
return false
})
mapData := m.realmService.GetBigMap().GetMapData()
ux, uy := mapData.GetBlockByPos(heroBaseX, heroBaseY)
startX := ux * mapData.BlockData().XLen
startY := uy * mapData.BlockData().YLen
sequence := regdata.BlockSequence(ux, uy)
var data *regdata.RegionMultiLevelNpcData
for _, data = range m.datas.GetRegionMultiLevelNpcDataArray() {
if int32(data.TypeData.Type) == proto.NpcType {
break
}
}
id := npcid.GetNpcId(sequence, data.Id, npcid.NpcType_MultiLevelMonster)
baseX := startX + data.OffsetBaseX
baseY := startY + data.OffsetBaseY
hc.Send(gm.NewS2cInvaseTargetIdMsg(idbytes.ToBytes(id), u64.Int32(baseX), u64.Int32(baseY)))
}
type hero_near_slice struct {
baseX, baseY int
a []*entity.Hero
}
func (a *hero_near_slice) score(hero *entity.Hero) int {
return imath.Abs(hero.BaseX()-a.baseX) + imath.Abs(hero.BaseY()-a.baseY)
}
func (a *hero_near_slice) Len() int { return len(a.a) }
func (a *hero_near_slice) Swap(i, j int) { a.a[i], a.a[j] = a.a[j], a.a[i] }
func (a *hero_near_slice) Less(i, j int) bool { return a.score(a.a[i]) < a.score(a.a[j]) }
func (m *GmModule) getOrCreateFakeHeroControler(id int64) iface.HeroController {
sender := m.world.GetUserCloseSender(id)
if sender != nil {
u, ok := sender.(iface.ConnectedUser)
if ok {
return u.GetHeroController()
}
} else {
sender = fakeSender
}
return service.NewHeroController(id, sender, "127.0.0.1", 0x100007f, 0, m.heroDataService.NewHeroLocker(id))
}
var fakeSender = &fake_sender{}
type fake_sender struct{}
func (m *fake_sender) Id() int64 { return 0 }
func (m *fake_sender) SendAll(msgs []pbutil.Buffer) {}
func (m *fake_sender) Send(msg pbutil.Buffer) {}
func (m *fake_sender) SendIfFree(msg pbutil.Buffer) {}
func (m *fake_sender) Disconnect(err msg.ErrMsg) {}
func (m *fake | _s | identifier_body |
|
gm.go | WarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: db,
tick: tick,
config: config,
datas: datas,
modules: modules,
heroDataService: dep.HeroData(),
world: dep.World(),
reminderService: reminderService,
realmService: realmService,
heroSnapshotService: dep.HeroSnapshot(),
sharedGuildService: dep.Guild(),
pushService: pushService,
farmService: farmService,
mingcWarService: mingcWarService,
mingcService: mingcService,
clusterService: clusterService, |
if m.config.IsDebug {
if m.config.IsDebugYuanbao {
m.groups = []*gm_group{
{
tab: "常用",
handler: []*gm_handler{
newCmdIntHandler("加元宝(负数表示减)_10", "加元宝(负数表示减)", "100000", func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
m.addYuanbao(amount, hero, result, hc)
result.Changed()
return
})
}),
},
},
}
} else {
m.groups = []*gm_group{
m.newCommonGmGroup(),
m.newDomesticGmGroup(),
m.newGoodsGmGroup(),
m.newSingleGoodsGmGroup(),
m.newSingleEquipmentGmGroup(),
m.newSingleGemGmGroup(),
m.newResetGmGroup(),
m.newTaskGmGroup(),
m.newDungeonGmGroup(),
m.newMailGmGroup(),
m.newLevelGmGroup(),
m.newSceneGmGroup(),
m.newZhanJiangGmGroup(),
m.newMiscGmGroup(),
m.newPrintEquipsGmGroup(),
m.newMingcWarGmGroup(),
m.newMingcGmGroup(),
m.newRedPacketGmGroup(),
m.newCountryGmGroup(),
}
}
}
// 处理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: " + proto.Cmd))
}
//gogen:iface
func (m *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.Base | seasonService: seasonService,
buffService: buffService,
country: country,
gameExporter: gameExporter,
} | random_line_split |
gm.go | WarService, mingcService iface.MingcService,
clusterService *cluster.ClusterService, seasonService iface.SeasonService, gameExporter iface.GameExporter, country iface.CountryService,
tick iface.TickerService) *GmModule {
m := &GmModule{
dep: dep,
time: dep.Time(),
db: db,
tick: tick,
config: config,
datas: datas,
modules: modules,
heroDataService: dep.HeroData(),
world: dep.World(),
reminderService: reminderService,
realmService: realmService,
heroSnapshotService: dep.HeroSnapshot(),
sharedGuildService: dep.Guild(),
pushService: pushService,
farmService: farmService,
mingcWarService: mingcWarService,
mingcService: mingcService,
clusterService: clusterService,
seasonService: seasonService,
buffService: buffService,
country: country,
gameExporter: gameExporter,
}
if m.config.IsDebug {
if m.config.IsDebugYuanbao {
m.groups = []*gm_group{
{
tab: "常用",
handler: []*gm_handler{
newCmdIntHandler("加元宝(负数表示减)_10", "加元宝(负数表示减)", "100000", func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
m.addYuanbao(amount, hero, result, hc)
result.Changed()
return
})
}),
},
},
}
} else {
m.groups = []*gm_group{
m.newCommonGmGroup(),
m.newDomesticGmGroup(),
m.newGoodsGmGroup(),
m.newSingleGoodsGmGroup(),
m.newSingleEquipmentGmGroup(),
m.newSingleGemGmGroup(),
m.newResetGmGroup(),
m.newTaskGmGroup(),
m.newDungeonGmGroup(),
m.newMailGmGroup(),
m.newLevelGmGroup(),
m.newSceneGmGroup(),
m.newZhanJiangGmGroup(),
m.newMiscGmGroup(),
m.newPrintEquipsGmGroup(),
m.newMingcWarGmGroup(),
m.newMingcGmGroup(),
m.newRedPacketGmGroup(),
m.newCountryGmGroup(),
}
}
}
// 处理模块消息
var i interface{}
i = m
if modules, ok := i.(interface {
initModuleHandler() []*gm_group
}); ok {
m.groups = append(m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: | *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.Base | " + proto.Cmd))
}
//gogen:iface
func (m | conditional_block |
gm.go | (m.groups, modules.initModuleHandler()...)
}
var protoBytes [][]byte
for _, g := range m.groups {
proto := &shared_proto.GmCmdListProto{}
proto.Tab = g.tab
for _, h := range g.handler {
hp := &shared_proto.GmCmdProto{}
hp.Cmd = h.cmd
hp.Desc = h.desc
hp.HasInput = len(h.defaultInput) > 0
hp.DefaultInput = h.defaultInput
proto.Cmd = append(proto.Cmd, hp)
}
protoBytes = append(protoBytes, must.Marshal(proto))
}
m.listCmdMsg = gm.NewS2cListCmdMarshalMsg(protoBytes).Static()
m.hctx = heromodule.NewContext(m.dep, operate_type.GMCmd)
return m
}
//gogen:iface
type GmModule struct {
dep iface.ServiceDep
time iface.TimeService
db iface.DbService
tick iface.TickerService
config *kv.IndividualServerConfig
datas *config.ConfigDatas
modules iface.Modules
heroDataService iface.HeroDataService
//sharedGuildService iface.SharedGuildService
world iface.WorldService
country iface.CountryService
reminderService iface.ReminderService
realmService iface.RealmService
heroSnapshotService iface.HeroSnapshotService
sharedGuildService iface.GuildService
pushService iface.PushService
farmService iface.FarmService
mingcWarService iface.MingcWarService
mingcService iface.MingcService
clusterService *cluster.ClusterService
seasonService iface.SeasonService
buffService iface.BuffService
gameExporter iface.GameExporter
groups []*gm_group
listCmdMsg pbutil.Buffer
hctx *heromodule.HeroContext
}
type gm_group struct {
tab string
handler []*gm_handler
}
func newIntHandler(desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, f)
}
func newCmdIntHandler(cmd, desc, defaultInput string, f func(amount int64, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(cmd, desc, defaultInput, func(input string, hc iface.HeroController) {
i, err := strconv.ParseInt(input, 10, 64)
if err != nil {
logrus.WithError(err).Warnf("GM命令收到的input不是数字,cmd:%s input: %s", cmd, input)
}
f(i, hc)
})
}
var cmdMap = map[string]struct{}{}
func newCmd(desc string) string {
cmd := desc
if _, exist := cmdMap[cmd]; exist {
for i := 0; i < 1000; i++ {
cmd = fmt.Sprintf("%s_%v", desc, i)
if _, exist := cmdMap[cmd]; !exist {
break
}
}
}
cmdMap[cmd] = struct{}{}
return cmd
}
func newStringHandler(desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, f)
}
func newHeroIntHandler(desc, defaultInput string, f func(amount int64, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdIntHandler(newCmd(desc), desc, defaultInput, func(amount int64, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(amount, hero, result, hc)
result.Changed()
return
})
})
}
func newHeroStringHandler(desc, defaultInput string, f func(input string, hero *entity.Hero, result herolock.LockResult, hc iface.HeroController)) *gm_handler {
return newCmdStringHandler(newCmd(desc), desc, defaultInput, func(input string, hc iface.HeroController) {
hc.FuncWithSend(func(hero *entity.Hero, result herolock.LockResult) {
f(input, hero, result, hc)
result.Changed()
return
})
})
}
func newCmdStringHandler(cmd, desc, defaultInput string, f func(input string, hc iface.HeroController)) *gm_handler {
h := &gm_handler{}
h.cmd = cmd
h.cmdSpace = cmd + " "
h.desc = desc
h.defaultInput = defaultInput
h.handle = f
return h
}
type gm_handler struct {
cmd string
cmdSpace string
desc string
defaultInput string
handle func(input string, hc iface.HeroController)
}
//gogen:iface c2s_list_cmd
func (m *GmModule) ProcessListCmdMsg(hc iface.HeroController) {
hc.Send(m.listCmdMsg)
}
//gogen:iface
func (m *GmModule) ProcessGmMsg(proto *gm.C2SGmProto, hc iface.HeroController) {
if !m.config.GetIsDebug() {
logrus.Errorf("不是debug模式,但是收到debug消息")
//hc.Disconnect()
return
}
defer func() {
if r := recover(); r != nil {
// 严重错误. 英雄线程这里不能panic
logrus.WithField("err", r).WithField("stack", string(debug.Stack())).Warn("GmMsg recovered from panic!!! SERIOUS PROBLEM")
metrics.IncPanic()
}
}()
logrus.Debugf("收到GM命令:%s", proto.Cmd)
cmd := strings.TrimSpace(proto.Cmd)
for _, g := range m.groups {
for _, h := range g.handler {
if strings.HasPrefix(cmd, h.cmdSpace) || cmd == h.cmd {
input := ""
if len(cmd) > len(h.cmdSpace) {
input = cmd[len(h.cmdSpace):]
}
h.handle(input, hc)
return
}
}
}
hc.Send(gm.NewS2cGmMsg("GM无效的命令: " + proto.Cmd))
}
//gogen:iface
func (m *GmModule) ProcessInvaseTargetIdMsg(proto *gm.C2SInvaseTargetIdProto, hc iface.HeroController) {
var heroBaseX, heroBaseY int
hc.Func(func(hero *entity.Hero, err error) (heroChanged bool) {
heroBaseX, heroBaseY = hero.BaseX(), hero.BaseY()
return false
})
mapData := m.realmService.GetBigMap().GetMapData()
ux, uy := mapData.GetBlockByPos(heroBaseX, heroBaseY)
startX := ux * mapData.BlockData().XLen
startY := uy * mapData.BlockData().YLen
sequence := regdata.BlockSequence(ux, uy)
var data *regdata.RegionMultiLevelNpcData
for _, data = range m.datas.GetRegionMultiLevelNpcDataArray() {
if int32(data.TypeData.Type) == proto.NpcType {
break
}
}
id := npcid.GetNpcId(sequence, data.Id, npcid.NpcType_MultiLevelMonster)
baseX := startX + data.OffsetBaseX
baseY := startY + data.OffsetBaseY
hc.Send(gm.NewS2cInvaseTargetIdMsg(idbytes.ToBytes(id), u64.Int32(baseX), u64.Int32(baseY)))
}
type hero_near_slice struct {
baseX, baseY int
a []*entity.Hero
}
func (a *hero_near_slice) score(hero *entity.Hero) int {
return imath.Abs(hero.BaseX()-a.baseX) + imath.Abs(hero.BaseY()-a.baseY)
}
func (a *hero_near_slice) Len() int { return len(a.a) }
func (a *hero_near_slice) Swap(i, j int) { a.a[i], a.a[j] = a.a[j], a.a[i] }
func (a *hero_near_slice) Less(i, j int) bool { return a.score(a.a[i]) < a.score(a.a[j]) }
func (m *GmModule) getOrCreateFakeHeroControler(id int64) iface.HeroController {
sender := m.world.GetUserCloseSender(id)
if sender != nil {
u, ok := sender.(iface.ConnectedUser)
if ok {
return u.GetHeroController()
}
} else {
sender = fakeSender
}
return service.NewHeroController(id, sender, "127.0.0.1", 0x100007f, 0, m.heroDataService.NewHeroLocker(id))
}
var fakeSender = &fake_sender{}
type fake_sender struct{}
func (m *fake_sender) Id() int64 { return 0 }
func (m *fake_sender) SendAll(msgs []pbutil.Buffer) {}
func (m *fake_sender) Send(msg pbutil.Buffer) {}
func (m *fake_sender) SendIfFree(msg pbutil.Buffer) {}
func (m *fake_sender) Disconnect(err msg.ErrMsg) {}
func (m *fake_sender) DisconnectAndWait(err msg.ErrMsg) {}
func (m *fake_sender) IsClosed() bool { return false }
//func (module *GmModul | e) processGoodsCm | identifier_name |
|
prune_head_with_taylor.py | max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
|
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
# Load a fine-tuned model
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare the data
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)[0:args.num_examples]
model.eval()
if args.bert_model.find("base") != -1:
num_head, num_layer = 12, 12
elif args.bert_model.find("large") != -1:
num_head, num_layer = 16, 24
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
imp_head_count = [[0]*num_head for i in range(num_layer)]
prune_head_count = [[0]*num_head for i in range(num_layer)]
all_logits, all | torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl') | conditional_block |
prune_head_with_taylor.py | if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
# Load a fine-tuned model
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
| """Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2" | identifier_body |
|
prune_head_with_taylor.py | max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist
def | (tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
# Load a fine-tuned model
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(
args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare the data
eval_segment = "dev_matched" if args.task_name == "mnli" else "dev"
eval_examples = processor.get_dev_examples(
args.data_dir, segment=eval_segment)[0:args.num_examples]
model.eval()
if args.bert_model.find("base") != -1:
num_head, num_layer = 12, 12
elif args.bert_model.find("large") != -1:
num_head, num_layer = 16, 24
eval_loss, eval_result = 0, 0
nb_eval_steps = 0
imp_head_count = [[0]*num_head for i in range(num_layer)]
prune_head_count = [[0]*num_head for i in range(num_layer)]
all_logits, all | _truncate_seq_pair | identifier_name |
prune_head_with_taylor.py | "rte": RteProcessor,
"sst-2": SstProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"wnli": WnliProcessor,
"sts-b": StsProcessor,
"scitail": ScitailProcessor,
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"rte": 2,
"sst-2": 2,
"qqp": 2,
"qnli": 2,
"wnli": 2,
"sts-b": 1,
"scitail": 2,
}
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label: i for i, label in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
base_tokens = ["[UNK]"] + ["[UNK]"]*len(tokens_a) + ["[UNK]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
base_tokens += ["[UNK]"]*len(tokens_b) + ["[UNK]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug("*** Example ***")
logger.debug("guid: %s" % (example.guid))
logger.debug("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.debug("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.debug("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.debug(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.debug("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
baseline_ids=baseline_ids))
tokenslist.append({"token":tokens, "golden_label":example.label, "pred_label":None})
return features, tokenslist
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the experimental results will be written.")
parser.add_argument("--model_file",
default=None,
type=str,
required=True,
help="The model file which will be evaluated.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
# pruning head parameters
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size for computing the attention head importance.")
parser.add_argument("--eval_batch_size",
default=200,
type=int,
help="Batch size for evaluation.")
parser.add_argument("--num_examples",
default=200,
type=int,
help="The number of dev examples to compute the attention head importance.")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}".format(
device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > | processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor, | random_line_split |
|
family.go | Shaw",
"Snyder",
"Mason",
"Dixon",
"Munoz",
"Hunt",
"Hicks",
"Holmes",
"Palmer",
"Wagner",
"Black",
"Robertson",
"Boyd",
"Rose",
"Stone",
"Salazar",
"Fox",
"Warren",
"Mills",
"Meyer",
"Rice",
"Schmidt",
"Garza",
"Daniels",
"Ferguson",
"Nichols",
"Stephens",
"Soto",
"Weaver",
"Ryan",
"Gardner",
"Payne",
"Grant",
"Dunn",
"Kelley",
"Spencer",
"Hawkins",
"Arnold",
"Pierce",
"Vazquez",
"Hansen",
"Peters",
"Santos",
"Hart",
"Bradley",
"Knight",
"Elliott",
"Cunningham",
"Duncan",
"Armstrong",
"Hudson",
"Carroll",
"Lane",
"Riley",
"Andrews",
"Alvarado",
"Ray",
"Delgado",
"Berry",
"Perkins",
"Hoffman",
"Johnston",
"Matthews",
"Pena",
"Richards",
"Contreras",
"Willis",
"Carpenter",
"Lawrence",
"Sandoval",
"Guerrero",
"George",
"Chapman",
"Rios",
"Estrada",
"Ortega",
"Watkins",
"Greene",
"Nunez",
"Wheeler",
"Valdez",
"Harper",
"Burke",
"Larson",
"Santiago",
"Maldonado",
"Morrison",
"Franklin",
"Carlson",
"Austin",
"Dominguez",
"Carr",
"Lawson",
"Jacobs",
"Obrien",
"Lynch",
"Singh",
"Vega",
"Bishop",
"Montgomery",
"Oliver",
"Jensen",
"Harvey",
"Williamson",
"Gilbert",
"Dean",
"Sims",
"Espinoza",
"Howell",
"Li",
"Wong",
"Reid",
"Hanson",
"Le",
"Mccoy",
"Garrett",
"Burton",
"Fuller",
"Wang",
"Weber",
"Welch",
"Rojas",
"Lucas",
"Marquez",
"Fields",
"Park",
"Yang",
"Little",
"Banks",
"Padilla",
"Day",
"Walsh",
"Bowman",
"Schultz",
"Luna",
"Fowler",
"Mejia",
"Davidson",
"Acosta",
"Brewer",
"May",
"Holland",
"Juarez",
"Newman",
"Pearson",
"Curtis",
"Cortez",
"Douglas",
"Schneider",
"Joseph",
"Barrett",
"Navarro",
"Figueroa",
"Keller",
"Avila",
"Wade",
"Molina",
"Stanley",
"Hopkins",
"Campos",
"Barnett",
"Bates",
"Chambers",
"Caldwell",
"Beck",
"Lambert",
"Miranda",
"Byrd",
"Craig",
"Ayala",
"Lowe",
"Frazier",
"Powers",
"Neal",
"Leonard",
"Gregory",
"Carrillo",
"Sutton",
"Fleming",
"Rhodes",
"Shelton",
"Schwartz",
"Norris",
"Jennings",
"Watts",
"Duran",
"Walters",
"Cohen",
"Mcdaniel",
"Moran",
"Parks",
"Steele",
"Vaughn",
"Becker",
"Holt",
"Deleon",
"Barker",
"Terry",
"Hale",
"Leon",
"Hail",
"Benson",
"Haynes",
"Horton",
"Miles",
"Lyons",
"Pham",
"Graves",
"Bush",
"Thornton",
"Wolfe",
"Warner",
"Cabrera",
"Mckinney",
"Mann",
"Zimmerman",
"Dawson",
"Lara",
"Fletcher",
"Page",
"Mccarthy",
"Love",
"Robles",
"Cervantes",
"Solis",
"Erickson",
"Reeves",
"Chang",
"Klein",
"Salinas",
"Fuentes",
"Baldwin",
"Daniel",
"Simon",
"Velasquez",
"Hardy",
"Higgins",
"Aguirre",
"Lin",
"Cummings",
"Chandler",
"Sharp",
"Barber",
"Bowen",
"Ochoa",
"Dennis",
"Robbins",
"Liu",
"Ramsey",
"Francis",
"Griffith",
"Paul",
"Blair",
"Oconnor",
"Cardenas",
"Pacheco",
"Cross",
"Calderon",
"Quinn",
"Moss",
"Swanson",
"Chan",
"Rivas",
"Khan",
"Rodgers",
"Serrano",
"Fitzgerald",
"Rosales",
"Stevenson",
"Christensen",
"Manning",
"Gill",
"Curry",
"Mclaughlin",
"Harmon",
"Mcgee",
"Gross",
"Doyle",
"Garner",
"Newton",
"Burgess",
"Reese",
"Walton",
"Blake",
"Trujillo",
"Adkins",
"Brady",
"Goodman",
"Roman",
"Webster",
"Goodwin",
"Fischer",
"Huang",
"Potter",
"Delacruz",
"Montoya", | "Wu",
"Hines",
"Mullins",
"Castaneda",
"Malone",
"Cannon",
"Tate",
"Mack",
"Sherman",
"Hubbard",
"Hodges",
"Zhang",
"Guerra",
"Wolf",
"Valencia",
"Saunders",
"Franco",
"Rowe",
"Gallagher",
"Farmer",
"Hammond",
"Hampton",
"Townsend",
"Ingram",
"Wise",
"Gallegos",
"Clarke",
"Barton",
"Schroeder",
"Maxwell",
"Waters",
"Logan",
"Camacho",
"Strickland",
"Norman",
"Person",
"Colon",
"Parsons",
"Frank",
"Harrington",
"Glover",
"Osborne",
"Buchanan",
"Casey",
"Floyd",
"Patton",
"Ibarra",
"Ball",
"Tyler",
"Suarez",
"Bowers",
"Orozco",
"Salas",
"Cobb",
"Gibbs",
"Andrade",
"Bauer",
"Conner",
"Moody",
"Escobar",
"Mcguire",
"Lloyd",
"Mueller",
"Hartman",
"French",
"Kramer",
"Mcbride",
"Pope",
"Lindsey",
"Velazquez",
"Norton",
"Mccormick",
"Sparks",
"Flynn",
"Yates",
"Hogan",
"Marsh",
"Macias",
"Villanueva",
"Zamora",
"Pratt",
"Stokes",
"Owen",
"Ballard",
"Lang",
"Brock",
"Villarreal",
"Charles",
"Drake",
"Barrera",
"Cain",
"Patrick",
"Pineda",
"Burnett",
"Mercado",
"Santana",
"Shepherd",
"Bautista",
"Ali",
"Shaffer",
"Lamb",
"Trevino",
"Mckenzie",
"Hess",
"Beil",
"Olsen",
"Cochran",
"Morton",
"Nash",
"Wilkins",
"Petersen",
"Briggs",
"Shah",
"Roth",
"Nicholson",
"Holloway",
"Lozano",
"Rangel",
"Flowers",
"Hoover",
"Short",
"Arias",
"Mora",
"Valenzuela",
"Bryan",
"Meyers",
"Weiss",
"Underwood",
"Bass",
"Greer",
"Summers",
"Houston",
"Carson",
"Morrow",
"Cl | "Todd", | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.