code
stringlengths
11
335k
docstring
stringlengths
20
11.8k
func_name
stringlengths
1
100
language
stringclasses
1 value
repo
stringclasses
245 values
path
stringlengths
4
144
url
stringlengths
43
214
license
stringclasses
4 values
func (d *Downloader) DownloadConfig(dir string) (map[string]string, error) { if err := os.MkdirAll(dir, 0755); err != nil { return nil, fmt.Errorf("error creating config dir: %s", err) } paths := make(map[string]string, len(config)) for _, conf := range config { path, err := d.downloadGzippedFile(conf, dir, false) if err != nil { return nil, err } paths[conf] = path } return paths, nil }
DownloadConfig downloads the Flynn config files using the tuf client to the given dir.
DownloadConfig
go
flynn/flynn
host/downloader/downloader.go
https://github.com/flynn/flynn/blob/master/host/downloader/downloader.go
BSD-3-Clause
func (m *Manager) OpenDB() error { if m.dbPath == "" { return nil } m.dbMtx.Lock() defer m.dbMtx.Unlock() // open/initialize db if err := os.MkdirAll(filepath.Dir(m.dbPath), 0755); err != nil { return fmt.Errorf("could not mkdir for volume persistence db: %s", err) } db, err := bolt.Open(m.dbPath, 0600, &bolt.Options{Timeout: 5 * time.Second}) if err != nil { return fmt.Errorf("could not open volume persistence db: %s", err) } if err := db.Update(func(tx *bolt.Tx) error { // idempotently create buckets. (errors ignored because they're all compile-time impossible args checks.) tx.CreateBucketIfNotExists([]byte("volumes")) tx.CreateBucketIfNotExists([]byte("providers")) return nil }); err != nil { return fmt.Errorf("could not initialize volume persistence db: %s", err) } m.db = db if err := m.restore(); err != nil { return err } return m.maybeInitDefaultProvider() }
OpenDB opens and initialises the persistence DB, if not already open.
OpenDB
go
flynn/flynn
host/volume/manager/manager.go
https://github.com/flynn/flynn/blob/master/host/volume/manager/manager.go
BSD-3-Clause
func (m *Manager) CloseDB() error { m.dbMtx.Lock() defer m.dbMtx.Unlock() if m.db == nil { return nil } if err := m.db.Close(); err != nil { return err } m.db = nil return nil }
CloseDB closes the persistence DB. The DB mutex is locked to protect m.db, but also prevents closing the DB when it could still be needed to service API requests (see LockDB).
CloseDB
go
flynn/flynn
host/volume/manager/manager.go
https://github.com/flynn/flynn/blob/master/host/volume/manager/manager.go
BSD-3-Clause
func (m *Manager) LockDB() error { m.dbMtx.RLock() if m.db == nil { m.dbMtx.RUnlock() return ErrDBClosed } return nil }
LockDB acquires a read lock on the DB mutex so that it cannot be closed until the caller has finished performing actions which will lead to changes being persisted to the DB. For example, creating a volume first delegates to the provider to create the volume and then persists to the DB, but if the DB is closed in that time then the volume state will be lost. ErrDBClosed is returned if the DB is already closed so API requests will fail before any actions are performed.
LockDB
go
flynn/flynn
host/volume/manager/manager.go
https://github.com/flynn/flynn/blob/master/host/volume/manager/manager.go
BSD-3-Clause
func (m *Manager) UnlockDB() { m.dbMtx.RUnlock() }
UnlockDB releases a read lock on the DB mutex, previously acquired by a call to LockDB.
UnlockDB
go
flynn/flynn
host/volume/manager/manager.go
https://github.com/flynn/flynn/blob/master/host/volume/manager/manager.go
BSD-3-Clause
func (m *Manager) persistVolume(tx *bolt.Tx, vol volume.Volume) error { // Save the general volume info volumesBucket := tx.Bucket([]byte("volumes")) id := vol.Info().ID k := []byte(id) _, volExists := m.volumes[id] if !volExists { volumesBucket.Delete(k) } else { b, err := json.Marshal(vol.Info()) if err != nil { return fmt.Errorf("failed to serialize volume info: %s", err) } err = volumesBucket.Put(k, b) if err != nil { return fmt.Errorf("could not persist volume info to boltdb: %s", err) } } // Save any provider-specific metadata associated with the volume. // These are saved per-provider since the deserialization is also only defined per-provider implementation. providerBucket, err := m.getProviderBucket(tx, m.providerIDs[vol.Provider()]) if err != nil { return fmt.Errorf("could not persist provider volume info to boltdb: %s", err) } providerVolumesBucket := providerBucket.Bucket([]byte("volumes")) if !volExists { providerVolumesBucket.Delete(k) } else { b, err := vol.Provider().MarshalVolumeState(id) if err != nil { return fmt.Errorf("failed to serialize provider volume info: %s", err) } err = providerVolumesBucket.Put(k, b) if err != nil { return fmt.Errorf("could not persist provider volume info to boltdb: %s", err) } } return nil }
Called to sync changes to disk when a volume is updated
persistVolume
go
flynn/flynn
host/volume/manager/manager.go
https://github.com/flynn/flynn/blob/master/host/volume/manager/manager.go
BSD-3-Clause
func (f *ClusterFixer) FindAppReleaseJobs(app, typ string) []map[string]*host.Job { var sortReleases ReleasesByCreate releases := make(map[string]map[string]*host.ActiveJob) // map of releaseID -> hostID -> job ordered // connect to each host, list jobs, find distinct releases for _, h := range f.hosts { jobs, err := h.ListJobs() if err != nil { f.l.Error("error listing jobs", "host", h.ID(), "error", err) continue } for _, j := range jobs { if j.Job.Metadata["flynn-controller.app_name"] != app || j.Job.Metadata["flynn-controller.type"] != typ { continue } id := j.Job.Metadata["flynn-controller.release"] if id == "" { continue } m, ok := releases[id] if !ok { sortReleases = append(sortReleases, SortableRelease{id, j.StartedAt}) m = make(map[string]*host.ActiveJob) releases[id] = m } if curr, ok := m[h.ID()]; ok && curr.StartedAt.Before(j.StartedAt) { continue } jobCopy := j m[h.ID()] = &jobCopy } } sort.Sort(sortReleases) res := make([]map[string]*host.Job, len(sortReleases)) for i, r := range sortReleases { res[i] = make(map[string]*host.Job, len(releases[r.id])) for k, v := range releases[r.id] { res[i][k] = v.Job } } return res }
FindAppReleaseJobs returns a slice with one map of host id to job for each known release of the given app and type, most recent first
FindAppReleaseJobs
go
flynn/flynn
host/fixer/fixer.go
https://github.com/flynn/flynn/blob/master/host/fixer/fixer.go
BSD-3-Clause
func Test(t *testing.T) { TestingT(t) }
Hook gocheck up to the "go test" runner
Test
go
flynn/flynn
host/resource/resource_test.go
https://github.com/flynn/flynn/blob/master/host/resource/resource_test.go
BSD-3-Clause
func (c *ContainerInit) changeState(state State, err string, exitStatus int) { if err != "" { logger.Debug("changing state", "fn", "changeState", "state", state, "err", err) } else if exitStatus != -1 { logger.Debug("changing state", "fn", "changeState", "state", state, "exitStatus", exitStatus) } else { logger.Debug("changing state", "fn", "changeState", "state", state) } c.state = state c.error = err c.exitStatus = exitStatus c.streamsMtx.RLock() defer c.streamsMtx.RUnlock() for ch := range c.streams { ch <- StateChange{State: state, Error: err, ExitStatus: exitStatus} } }
Caller must hold lock
changeState
go
flynn/flynn
host/containerinit/init.go
https://github.com/flynn/flynn/blob/master/host/containerinit/init.go
BSD-3-Clause
func containerInitApp(c *Config, logFile *os.File) error { log := logger.New() init := newContainerInit(c, logFile) log.Debug("registering RPC server") if err := rpcplus.Register(init); err != nil { log.Error("error registering RPC server", "err", err) return err } init.mtx.Lock() defer init.mtx.Unlock() // Prepare the cmd based on the given args // If this fails we report that below cmdPath, cmdErr := getCmdPath(c) cmd := exec.Command(cmdPath, c.Args[1:]...) cmd.Dir = c.WorkDir cmd.Env = make([]string, 0, len(c.Env)) for k, v := range c.Env { cmd.Env = append(cmd.Env, k+"="+v) } // App runs in its own session cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true} if c.Uid != nil || c.Gid != nil { cmd.SysProcAttr.Credential = &syscall.Credential{} if c.Uid != nil { cmd.SysProcAttr.Credential.Uid = *c.Uid } if c.Gid != nil { cmd.SysProcAttr.Credential.Gid = *c.Gid } } // Console setup. Hook up the container app's stdin/stdout/stderr to // either a pty or pipes. The FDs for the controlling side of the // pty/pipes will be passed to flynn-host later via a UNIX socket. if c.TTY { log.Debug("creating PTY") ptyMaster, ptySlave, err := pty.Open() if err != nil { log.Error("error creating PTY", "err", err) return err } init.ptyMaster = ptyMaster cmd.Stdout = ptySlave cmd.Stderr = ptySlave if c.OpenStdin { log.Debug("attaching stdin to PTY") cmd.Stdin = ptySlave cmd.SysProcAttr.Setctty = true } if c.Uid != nil && c.Gid != nil { if err := syscall.Fchown(int(ptySlave.Fd()), int(*c.Uid), int(*c.Gid)); err != nil { log.Error("error changing PTY ownership", "err", err) return err } } } else { // We copy through a socketpair (rather than using cmd.StdoutPipe directly) to make // it easier for flynn-host to do non-blocking I/O (via net.FileConn) so that no // read(2) calls can succeed after closing the logs during an update. // // We also don't assign the socketpair directly to fd 1 because that prevents jobs // using /dev/stdout (calling open(2) on a socket leads to an ENXIO error, see // http://marc.info/?l=ast-users&m=120978595414993). newPipe := func(pipeFn func() (io.ReadCloser, error), name string) (*os.File, error) { pipe, err := pipeFn() if err != nil { return nil, err } if c.Uid != nil && c.Gid != nil { if err := syscall.Fchown(int(pipe.(*os.File).Fd()), int(*c.Uid), int(*c.Gid)); err != nil { return nil, err } } sockR, sockW, err := newSocketPair(name) if err != nil { return nil, err } go func() { defer sockW.Close() for { // copy data from the pipe to the socket using splice(2) // (rather than io.Copy) to avoid a needless copy through // user space n, err := syscall.Splice(int(pipe.(*os.File).Fd()), nil, int(sockW.Fd()), nil, 65535, 0) if err != nil || n == 0 { return } } }() return sockR, nil } log.Debug("creating stdout pipe") var err error init.stdout, err = newPipe(cmd.StdoutPipe, "stdout") if err != nil { log.Error("error creating stdout pipe", "err", err) return err } log.Debug("creating stderr pipe") init.stderr, err = newPipe(cmd.StderrPipe, "stderr") if err != nil { log.Error("error creating stderr pipe", "err", err) return err } if c.OpenStdin { // Can't use cmd.StdinPipe() here, since in Go 1.2 it // returns an io.WriteCloser with the underlying object // being an *exec.closeOnce, neither of which provides // a way to convert to an FD. log.Debug("creating stdin pipe") pipeRead, pipeWrite, err := os.Pipe() if err != nil { log.Error("creating stdin pipe", "err", err) return err } cmd.Stdin = pipeRead init.stdin = pipeWrite } } go runRPCServer() // Wait for flynn-host to tell us to start init.mtx.Unlock() // Allow calls log.Debug("waiting to be resumed") <-init.resume log.Debug("resuming") init.mtx.Lock() if c.Hostname != "" { log.Debug("writing /etc/hosts") if err := writeEtcHosts(c.Hostname); err != nil { log.Error("error writing /etc/hosts", "err", err) init.changeState(StateFailed, fmt.Sprintf("error writing /etc/hosts: %s", err), -1) init.exit(1) } } log.Info("starting the job", "args", cmd.Args) if cmdErr != nil { log.Error("error starting the job", "err", cmdErr) init.changeState(StateFailed, cmdErr.Error(), -1) init.exit(1) } if err := cmd.Start(); err != nil { log.Error("error starting the job", "err", err) init.changeState(StateFailed, err.Error(), -1) init.exit(1) } log.Debug("setting state to running") init.process = cmd.Process init.changeState(StateRunning, "", -1) init.mtx.Unlock() // Allow calls // monitor services hbs := make([]discoverd.Heartbeater, 0, len(c.Ports)) for _, port := range c.Ports { if port.Service == nil { continue } log := log.New("name", port.Service.Name, "port", port.Port, "proto", port.Proto) log.Info("monitoring service") hb, err := monitor(port, init, c.Env, log) if err != nil { log.Error("error monitoring service", "err", err) os.Exit(70) } hbs = append(hbs, hb) } exitCode := babySit(init, hbs) log.Info("job exited", "status", exitCode) init.mtx.Lock() init.changeState(StateExited, "", exitCode) init.mtx.Unlock() // Allow calls log.Info("exiting") init.exit(exitCode) return nil }
Run as pid 1 and monitor the contained process to return its exit code.
containerInitApp
go
flynn/flynn
host/containerinit/init.go
https://github.com/flynn/flynn/blob/master/host/containerinit/init.go
BSD-3-Clause
func debugStackPrinter(out io.Writer) { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGUSR2) for range c { pprof.Lookup("goroutine").WriteTo(out, 1) } }
print a full goroutine stack trace to the log fd on SIGUSR2
debugStackPrinter
go
flynn/flynn
host/containerinit/init.go
https://github.com/flynn/flynn/blob/master/host/containerinit/init.go
BSD-3-Clause
func Main() { logR, logW, err := newSocketPair("log") if err != nil { os.Exit(70) } go debugStackPrinter(logW) config := &Config{} data, err := ioutil.ReadFile("/.containerconfig") if err != nil { os.Exit(70) } if err := json.Unmarshal(data, config); err != nil { os.Exit(70) } logger = log15.New("component", "containerinit") logger.SetHandler(log15.LvlFilterHandler(config.LogLevel, log15.StreamHandler(logW, log15.LogfmtFormat()))) // Propagate the plugin-specific container env variable config.Env["container"] = os.Getenv("container") if err := containerInitApp(config, logR); err != nil { os.Exit(70) } }
This code is run INSIDE the container and is responsible for setting up the environment before running the actual process
Main
go
flynn/flynn
host/containerinit/init.go
https://github.com/flynn/flynn/blob/master/host/containerinit/init.go
BSD-3-Clause
func newLogger(tty bool, file string, verbose bool) log15.Logger { stdoutFormat := log15.LogfmtFormat() if tty { stdoutFormat = log15.TerminalFormat() } stdoutHandler := log15.StreamHandler(os.Stdout, stdoutFormat) if !verbose { stdoutHandler = log15.LvlFilterHandler(log15.LvlInfo, stdoutHandler) } log := log15.New() log.SetHandler(log15.MultiHandler( log15.Must.FileHandler(file, log15.LogfmtFormat()), stdoutHandler, )) return log }
newLogger returns a log15.Logger which writes to stdout and a log file
newLogger
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func (b *Builder) Build(images []*Image) error { builds := make(map[string]*Build, len(images)) for _, image := range images { builds[image.ID] = NewBuild(image) } addDependency := func(build *Build, dependsOn string) error { dep, ok := builds[dependsOn] if !ok { return fmt.Errorf("unknown image dependency: %s -> %s", build.Image.ID, dependsOn) } build.AddDependency(dep) dep.AddDependent(build) return nil } for _, build := range builds { image := build.Image // determine build dependencies // TODO: check for circular dependencies if image.Base != "" { addDependency(build, image.Base) } for _, l := range image.Layers { if len(l.ProtoBuild) > 0 && l.BuildWith == "" { l.BuildWith = "protoc" } // build Go binaries using the Go image if len(l.GoBuild) > 0 || len(l.CGoBuild) > 0 || len(l.GoBin) > 0 { if l.BuildWith == "" { l.BuildWith = "go" } l.Inputs = append(l.Inputs, "go.mod", "go.sum") } if l.BuildWith != "" { addDependency(build, l.BuildWith) } } } // build images until there are no pending builds left done := make(chan *Build, len(builds)) failures := make(map[string]error) for len(builds) > 0 { for _, build := range builds { // if the build has no more pending dependencies, build it if len(build.Dependencies) == 0 { build.Once.Do(func() { // if the build is aborted due to a dependency // failure, just send it to the done channel if build.Abort { b.log.Debug(fmt.Sprintf("%s build abort", build.Image.ID)) done <- build return } b.log.Debug(fmt.Sprintf("%s build start", build.Image.ID)) go func(build *Build) { build.StartedAt = time.Now() build.Err = b.BuildImage(build.Image) done <- build }(build) }) } } // wait for a build to finish build := <-done b.bar.Increment() if build.Err == nil { b.log.Debug(fmt.Sprintf("%s build done", build.Image.ID), "duration", time.Since(build.StartedAt)) } else { b.log.Error(fmt.Sprintf("%s build error", build.Image.ID), "duration", time.Since(build.StartedAt), "err", build.Err) } // remove from the pending list delete(builds, build.Image.ID) // remove the build as a pending dependency from all // dependents for dependent := range build.Dependents { // if the build failed or was aborted, abort the // dependent builds if build.Err != nil || build.Abort { dependent.Abort = true } dependent.RemoveDependency(build) } if build.Err != nil { failures[build.Image.ID] = build.Err } } if len(failures) > 0 { b.log.Error("the following builds failed:") for id, err := range failures { b.log.Error("* "+id, "err", err) } return fmt.Errorf("%d builds failed", len(failures)) } return nil }
Build builds a list of images, ensuring that each image is built after any dependent images have been built
Build
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func (b *Builder) BuildImage(image *Image) error { var layers []*ct.ImageLayer for _, l := range image.Layers { name := l.Name if name == "" { name = image.ID } env := make(map[string]string, len(image.Env)+len(l.Env)) for k, v := range image.Env { env[k] = v } for k, v := range l.Env { env[k] = v } run := make([]string, len(l.Run)) for i, cmd := range l.Run { run[i] = cmd } var inputs []string // add the script as an input and run with 'bash -e' if l.Script != "" { inputs = append(inputs, l.Script) run = append(run, "bash -e "+l.Script) } // add the explicit inputs, expanding globs for _, input := range l.Inputs { paths, err := filepath.Glob(input) if err != nil { return err } inputs = append(inputs, paths...) } if len(l.GoBin) > 0 { for _, bin := range l.GoBin { run = append(run, "GOBIN=/bin GOBIN_CACHE=/tmp/gobin gobin -m "+bin) } } // if building protocols, add .proto inputs and build using // 'protoc' into /mnt/out/proto if len(l.ProtoBuild) > 0 { // add the build commands in a predictable order so // the generated layer ID is deterministic dirs := make([]string, 0, len(l.ProtoBuild)) for _, dir := range l.ProtoBuild { dirs = append(dirs, dir) } sort.Strings(dirs) for _, dir := range dirs { paths, err := filepath.Glob(filepath.Join(dir, "*.proto")) if err != nil { return err } inputs = append(inputs, paths...) outDir := filepath.Join("/mnt/out/proto", dir) run = append(run, fmt.Sprintf("mkdir -p %s", outDir), fmt.Sprintf("protoc -I /usr/local/include -I %s --go_out=plugins=grpc:%s %s", dir, outDir, strings.Join(paths, " ")), ) } } // if building Go binaries, load Go inputs for the configured // GOOS / GOARCH and build with 'go build' / 'cgo build' if len(l.GoBuild) > 0 || len(l.CGoBuild) > 0 { goInputs := b.GoInputsFor(GoPlatform{OS: env["GOOS"], Arch: env["GOARCH"]}) // add the commands in a predictable order so the // generated layer ID is deterministic dirs := make([]string, 0, len(l.GoBuild)) for dir := range l.GoBuild { dirs = append(dirs, dir) } sort.Strings(dirs) for _, dir := range dirs { i, err := goInputs.Load(dir) if err != nil { return err } inputs = append(inputs, i...) run = append(run, fmt.Sprintf("go build -o %s %s", l.GoBuild[dir], "./"+dir)) } dirs = make([]string, 0, len(l.CGoBuild)) for dir := range l.CGoBuild { dirs = append(dirs, dir) } sort.Strings(dirs) for _, dir := range dirs { i, err := goInputs.Load(dir) if err != nil { return err } inputs = append(inputs, i...) run = append(run, fmt.Sprintf("cgo build -o %s %s", l.CGoBuild[dir], "./"+dir)) } } // copy the l.Copy inputs in a predictable order so the // generated layer ID is deterministic copyPaths := make([]string, 0, len(l.Copy)) for path := range l.Copy { copyPaths = append(copyPaths, path) } sort.Strings(copyPaths) for _, path := range copyPaths { inputs = append(inputs, path) dst := l.Copy[path] run = append(run, fmt.Sprintf("mkdir -p %q && cp %q %q", filepath.Dir(dst), path, dst)) } // run the build job with either l.BuildWith or image.Base var artifact *ct.Artifact var err error if l.BuildWith != "" { artifact, err = b.Artifact(l.BuildWith) } else if image.Base != "" { artifact, err = b.Artifact(image.Base) } if err != nil { return err } // interpolate the environment variables for k, v := range env { tmpl, err := template.New("env").Parse(v) if err != nil { return fmt.Errorf("error parsing env template %q: %s", v, err) } var buf bytes.Buffer if err := tmpl.Execute(&buf, b.envTemplateData); err != nil { return fmt.Errorf("error parsing env template %q: %s", v, err) } env[k] = buf.String() } // generate the layer ID from the layer config, artifact and // list of inputs id, err := b.generateLayerID(name, run, env, artifact, inputs...) if err != nil { return err } start := time.Now() l, err := b.layerGroup.Do(id, func() (interface{}, error) { return b.BuildLayer(l, id, name, run, env, artifact, inputs) }) if err != nil { return err } layer := l.(*ct.ImageLayer) b.log.Debug(fmt.Sprintf("%s layer done", name), "layer.id", id, "duration", time.Since(start)) layers = append(layers, layer) } // generate an artifact based on image.Base and add to b.artifacts var baseLayers []*ct.ImageLayer if image.Base != "" { baseArtifact, err := b.Artifact(image.Base) if err != nil { return err } for _, rootfs := range baseArtifact.Manifest().Rootfs { baseLayers = append(baseLayers, rootfs.Layers...) } } manifest := ct.ImageManifest{ Type: ct.ImageManifestTypeV1, Rootfs: []*ct.ImageRootfs{{ Platform: ct.DefaultImagePlatform, Layers: append(baseLayers, layers...), }}, } if image.Entrypoint != nil { manifest.Entrypoints = map[string]*ct.ImageEntrypoint{ "_default": image.Entrypoint, } } imageURL := fmt.Sprintf("%s?name=%s&target=/images/%s.json", b.tufConfig.Repository, image.ID, manifest.ID()) artifact := &ct.Artifact{ Type: ct.ArtifactTypeFlynn, URI: imageURL, RawManifest: manifest.RawManifest(), Hashes: manifest.Hashes(), Size: int64(len(manifest.RawManifest())), LayerURLTemplate: layerURLTemplate, Meta: map[string]string{ "manifest.id": manifest.ID(), "flynn.component": image.ID, "flynn.system-image": "true", }, } b.artifactsMtx.Lock() b.artifacts[image.ID] = artifact b.artifactsMtx.Unlock() // write the artifact to build/image/ID.json path := filepath.Join("build", "image", image.ID+".json") if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return err } f, err := os.Create(path) if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(artifact) }
BuildImage builds the image's layers and adds the resulting artifact to b.artifacts
BuildImage
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func (b *Builder) WriteManifests(manifests map[string]string, tufRepository string) error { for src, name := range manifests { dst := filepath.Join("build", "manifests", name) b.log.Debug("writing manifest", "src", src, "dst", dst) manifest, err := ioutil.ReadFile(src) if err != nil { return err } var replaceErr error manifest = imageArtifactPattern.ReplaceAllFunc(manifest, func(raw []byte) []byte { name := string(raw[16 : len(raw)-1]) artifact, ok := b.artifacts[name] if !ok { replaceErr = fmt.Errorf("unknown image %q", name) return nil } artifact.Meta = map[string]string{ "flynn.component": name, "flynn.system-image": "true", } data, err := json.Marshal(artifact) if err != nil { replaceErr = err return nil } return data }) if replaceErr != nil { return replaceErr } if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { return err } if err := ioutil.WriteFile(dst, manifest, 0644); err != nil { return err } } return nil }
WriteManifests interpolates a set of manifests and writes them to the build/manifests directory
WriteManifests
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func (b *Builder) WriteImages() error { path := "build/images.json" tmp, err := os.Create(path + ".tmp") if err != nil { return err } defer tmp.Close() if err := json.NewEncoder(tmp).Encode(b.artifacts); err != nil { os.Remove(tmp.Name()) return err } return os.Rename(tmp.Name(), path) }
WriteImages writes the built images to build/images.json
WriteImages
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func (b *Builder) GetCachedLayer(name, id string) (*ct.ImageLayer, error) { // first check the local cache f, err := os.Open(b.layerConfigPath(id)) if err == nil { defer f.Close() layer := &ct.ImageLayer{} return layer, json.NewDecoder(f).Decode(layer) } else if !os.IsNotExist(err) { return nil, err } // not found locally, check the TUF repo data, err := tufutil.DownloadString(b.tufClient, fmt.Sprintf("/layers/%s.json", id)) if _, ok := err.(tuf.ErrUnknownTarget); ok { // cache miss, return a nil layer so it gets generated return nil, nil } else if err != nil { return nil, fmt.Errorf("error getting layer from the TUF repo: %s", err) } layer := &ct.ImageLayer{} if err := json.Unmarshal([]byte(data), layer); err != nil { return nil, fmt.Errorf("error getting layer from the TUF repo: %s", err) } // cache the layer locally b.log.Info("fetching layer", "layer.name", name, "layer.id", id, "layer.size", units.BytesSize(float64(layer.Length))) tmp, err := tufutil.Download(b.tufClient, fmt.Sprintf("/layers/%s.squashfs", id)) if err != nil { return nil, fmt.Errorf("error getting layer from the TUF repo: %s", err) } defer tmp.Close() f, err = os.Create(b.layerPath(id)) if err != nil { return nil, err } defer f.Close() if _, err := io.Copy(f, tmp); err != nil { return nil, fmt.Errorf("error writing layer to cache: %s", err) } if err := ioutil.WriteFile(b.layerConfigPath(id), []byte(data), 0644); err != nil { return nil, fmt.Errorf("error writing layer to cache: %s", err) } return layer, nil }
GetCachedLayer gets a layer either from the local /var/lib/flynn/layer-cache directory or from the TUF repository, returning a nil layer for a cache miss
GetCachedLayer
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func (b *Builder) BuildLayer(l *Layer, id, name string, run []string, env map[string]string, artifact *ct.Artifact, inputs []string) (*ct.ImageLayer, error) { // try and get the cached layer first layer, err := b.GetCachedLayer(name, id) if err != nil { return nil, err } else if layer != nil { return layer, nil } // create a shared directory containing the inputs so we can ensure the // job only accesses declared inputs (thus enforcing the correctness of // the generated layer ID) dir, err := ioutil.TempDir("", "flynn-build-mnt") if err != nil { return nil, err } defer os.RemoveAll(dir) if err := os.Chmod(dir, 0755); err != nil { return nil, err } for _, subdir := range []string{"bin", "out", "src"} { if err := os.MkdirAll(filepath.Join(dir, subdir), 0755); err != nil { return nil, err } } copyFile := func(srcPath, dstPath string) error { src, err := os.Open(srcPath) if err != nil { return err } defer src.Close() stat, err := src.Stat() if err != nil { return err } path := filepath.Join(dir, dstPath) if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return err } dst, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, stat.Mode()) if err != nil { return err } defer dst.Close() _, err = io.Copy(dst, src) return err } for _, input := range inputs { if err := copyFile(input, filepath.Join("src", input)); err != nil { b.log.Error("error copying input", "input", input, "err", err) return nil, err } } // copy the flynn-builder binary into the shared directory so we can // run it inside the job if err := copyFile(os.Args[0], "bin/flynn-builder"); err != nil { b.log.Error("error copying flynn-builder binary", "err", err) return nil, err } job := &host.Job{ Config: host.ContainerConfig{ Env: env, DisableLog: true, }, Resources: resource.Defaults(), Metadata: map[string]string{ "flynn-controller.app_name": "builder", "flynn-controller.type": name, }, } cmd := exec.Cmd{Job: job} // run bash inside the job, passing the commands via stdin job.Config.Args = []string{"/mnt/bin/flynn-builder", "run", "bash", "-exs"} job.Config.Stdin = true cmd.Stdin = strings.NewReader(strings.Join(run, "\n")) // set FLYNN_VERSION which will be assigned to the pkg/version.version // constant using ldflags when building Go binaries. // // This is not treated as an input because we only want to build a new // binary with the given version if the build inputs have changed. job.Config.Env["FLYNN_VERSION"] = b.version // run the job in the host network to avoid a kernel bug which causes // subsequent jobs to block waiting on the lo network device to become // free (see https://github.com/docker/docker/issues/5618). // // NOTE: this leads to an impure build, jobs sometimes use the state of // the network to change the installation procedure (e.g. PostgreSQL // changes the default port to 5433 if something is already listening // on port 5432 at install time) job.Config.HostNetwork = true linuxCapabilities := append(host.DefaultCapabilities, l.LinuxCapabilities...) job.Config.LinuxCapabilities = &linuxCapabilities if l.Limits == nil { l.Limits = make(map[string]string) } if l.Limits["temp_disk"] == "" { l.Limits["temp_disk"] = "1G" } for typ, v := range l.Limits { limit, err := resource.ParseLimit(resource.Type(typ), v) if err != nil { return nil, fmt.Errorf("error parsing limit %q = %q: %s", typ, v, err) } job.Resources.SetLimit(resource.Type(typ), limit) } // mount the shared directory at /mnt as a 9p filesystem ln, err := net.Listen("tcp", os.Getenv("EXTERNAL_IP")+":0") if err != nil { return nil, err } defer ln.Close() go serveFilesystem(dir, ln) addr := ln.Addr().(*net.TCPAddr) job.Config.Mounts = append(job.Config.Mounts, host.Mount{ Device: "9p", Location: "/mnt", Target: addr.IP.String(), Data: fmt.Sprintf("trans=tcp,port=%d", addr.Port), }) job.Config.WorkingDir = "/mnt/src" if artifact == nil { // use the base layer if there is no artifact to build with job.Mountspecs = []*host.Mountspec{b.baseLayer} } else { utils.SetupMountspecs(job, []*ct.Artifact{artifact}) } // copy output to log file + prefix stdout / stderr with the layer name logPath := filepath.Join("build/log", name+".log") if err := os.MkdirAll(filepath.Dir(logPath), 0755); err != nil { return nil, err } logFile, err := os.Create(logPath) if err != nil { return nil, err } logR, logW := io.Pipe() stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } go io.Copy(logW, stdout) stderr, err := cmd.StderrPipe() if err != nil { return nil, err } go io.Copy(logW, stderr) go func() { defer logFile.Close() s := bufio.NewScanner(logR) for s.Scan() { fmt.Fprintf(os.Stderr, "%s: %s: %s\n", time.Now().Format("15:04:05.999"), name, s.Text()) fmt.Fprintln(logFile, s.Text()) } }() // run the job b.log.Info("building layer", "layer.name", name, "layer.id", id) if err := cmd.Run(); err != nil { b.log.Error("error running the build job", "name", name, "err", err) return nil, err } // extract any generated protocol files back into the source directory // so they can be committed into git protoDir := filepath.Join(dir, "out", "proto") if _, err := os.Stat(protoDir); err == nil { if err := filepath.Walk(protoDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } src, err := os.Open(path) if err != nil { return err } defer src.Close() dstPath, err := filepath.Rel(protoDir, path) if err != nil { return err } dst, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, info.Mode()) if err != nil { return err } defer dst.Close() _, err = io.CopyN(dst, src, info.Size()) return err }); err != nil { return nil, fmt.Errorf("error extracting generated protocol files: %s", err) } } // copy the layer to the cache f, err := os.Open(filepath.Join(dir, "out", "layer.squashfs")) if err != nil { return nil, fmt.Errorf("error opening SquashFS layer: %s", err) } defer f.Close() stat, err := f.Stat() if err != nil { return nil, fmt.Errorf("error opening SquashFS layer: %s", err) } h := sha512.New512_256() dst, err := os.Create(b.layerPath(id)) if err != nil { return nil, fmt.Errorf("error writing to layer cache: %s", err) } defer dst.Close() if _, err := io.Copy(dst, io.TeeReader(f, h)); err != nil { return nil, fmt.Errorf("error writing to layer cache: %s", err) } layer = &ct.ImageLayer{ ID: id, Type: ct.ImageLayerTypeSquashfs, Length: stat.Size(), Hashes: map[string]string{ "sha512_256": hex.EncodeToString(h.Sum(nil)), }, } data, err := json.Marshal(layer) if err != nil { return nil, fmt.Errorf("error encoding layer config: %s", err) } if err := ioutil.WriteFile(b.layerConfigPath(id), data, 0644); err != nil { return nil, fmt.Errorf("error writing to layer cache: %s", err) } return layer, nil }
BuildLayer either returns a cached layer or runs a job to build the layer
BuildLayer
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func (b *Builder) generateLayerID(name string, run []string, env map[string]string, artifact *ct.Artifact, inputs ...string) (id string, err error) { start := time.Now() defer func() { b.log.Debug("generated layer ID", "name", name, "id", id, "duration", time.Since(start)) }() var layer = struct { Name string `json:"name"` Run []string `json:"run,omitempty"` Env map[string]string `json:"env,omitempty"` RawManifest json.RawMessage `json:"manifest,omitempty"` Files []*fileInput `json:"files,omitempty"` }{ Name: name, Run: run, Env: env, Files: make([]*fileInput, 0, len(inputs)), } if artifact != nil { layer.RawManifest = artifact.RawManifest } addFile := func(path string) error { b.fileCacheMtx.Lock() cached, ok := b.fileCache[path] b.fileCacheMtx.Unlock() if ok { layer.Files = append(layer.Files, cached) return nil } f, err := os.Open(path) if err != nil { return err } defer f.Close() stat, err := f.Stat() if err != nil { return err } h := sha512.New512_256() if _, err := io.Copy(h, f); err != nil { return err } fi := &fileInput{ Path: path, Size: stat.Size(), SHA: hex.EncodeToString(h.Sum(nil)), } b.fileCacheMtx.Lock() b.fileCache[path] = fi b.fileCacheMtx.Unlock() layer.Files = append(layer.Files, fi) return nil } for _, input := range inputs { if err := addFile(input); err != nil { return "", err } } data, err := cjson.Marshal(layer) if err != nil { return "", err } sum := sha512.Sum512_256(data) return hex.EncodeToString(sum[:]), nil }
generateLayerID generates a layer ID from a set of all inputs required to build the layer, which prevents rebuilding a layer if the inputs haven't changed. It does this by constructing a canonicalised JSON object representing the inputs and computing the SHA512/256 sum of the resulting bytes. TODO: consider storing a map of filenames to hashes and cache based on the last modified time to avoid unnecessary work.
generateLayerID
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func NewProgressBar(count int, tty bool) (*pb.ProgressBar, error) { bar := pb.New(count) if !tty { bar.Output = os.Stderr return bar, nil } // replace os.Stdout / os.Stderr with a pipe and copy output to a // channel so that the progress bar can be wiped before printing output type stdOutput struct { Out io.Writer Text string } output := make(chan *stdOutput) wrap := func(out io.Writer) (*os.File, error) { r, w, err := os.Pipe() if err != nil { return nil, err } go func() { s := bufio.NewScanner(r) for s.Scan() { output <- &stdOutput{out, s.Text()} } }() return w, nil } stdout := os.Stdout var err error os.Stdout, err = wrap(stdout) if err != nil { return nil, err } stderr := os.Stderr os.Stderr, err = wrap(stderr) if err != nil { return nil, err } progress := make(chan string) bar.Callback = func(out string) { progress <- out } go func() { var barText string for { select { case out := <-output: // if we have printed the bar, replace it with // spaces then write the output on the same line if len(barText) > 0 { spaces := make([]byte, len(barText)) for i := 0; i < len(barText); i++ { spaces[i] = ' ' } fmt.Fprint(stderr, "\r", string(spaces), "\r") } fmt.Fprintln(out.Out, out.Text) // re-print the bar on the next line if len(barText) > 0 { fmt.Fprint(stderr, "\r"+barText) } case out := <-progress: // print the bar over the previous bar barText = out fmt.Fprint(stderr, "\r"+out) } } }() return bar, nil }
NewProgressBar creates a progress bar which is pinned to the bottom of the terminal screen
NewProgressBar
go
flynn/flynn
builder/build.go
https://github.com/flynn/flynn/blob/master/builder/build.go
BSD-3-Clause
func determineVersion() (string, error) { out, err := exec.Command("build/bin/flynn-host", "version").CombinedOutput() if err != nil { return "", fmt.Errorf("error getting flynn-host version: %s: %s", err, out) } version := string(bytes.TrimSpace(out)) parts := strings.SplitN(version, "-", 2) return parts[0], nil }
determineVersion determines the version by running 'flynn-host version', potentially stripping off the '-<commit>' suffix
determineVersion
go
flynn/flynn
builder/export.go
https://github.com/flynn/flynn/blob/master/builder/export.go
BSD-3-Clause
func Test(t *testing.T) { TestingT(t) }
Hook gocheck up to the "go test" runner
Test
go
flynn/flynn
logaggregator/main_test.go
https://github.com/flynn/flynn/blob/master/logaggregator/main_test.go
BSD-3-Clause
func zip(msgSlices ...[]*rfc5424.Message) []*rfc5424.Message { n, m := len(msgSlices[0]), len(msgSlices) data := make([]*rfc5424.Message, 0, n*m) for i := 0; i < n; i++ { for j := range msgSlices { data = append(data, msgSlices[j][i]) } } return data }
return a slice of interlaced input data. input slices must be the same length.
zip
go
flynn/flynn
logaggregator/iterator_test.go
https://github.com/flynn/flynn/blob/master/logaggregator/iterator_test.go
BSD-3-Clause
func NewAggregator() *Aggregator { a := &Aggregator{ buffers: make(map[string]*buffer.Buffer), msgc: make(chan *rfc5424.Message, 1000), pausec: make(chan struct{}), } go a.run() return a }
NewAggregator creates a new running Aggregator.
NewAggregator
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func (a *Aggregator) Feed(msg *rfc5424.Message) { a.msgc <- msg }
Feed inserts a message in the aggregator.
Feed
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func (a *Aggregator) Read(id string) []*rfc5424.Message { return a.getBuffer(id).Read() }
Read returns the buffered messages for id.
Read
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func (a *Aggregator) ReadAll() [][]*rfc5424.Message { // TODO(benburkert): restructure Aggregator & ring.Buffer to avoid nested locks a.bmu.Lock() defer a.bmu.Unlock() buffers := make([][]*rfc5424.Message, 0, len(a.buffers)) for _, buf := range a.buffers { buffers = append(buffers, buf.Read()) } return buffers }
ReadAll returns all buffered messages.
ReadAll
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func (a *Aggregator) ReadAndSubscribe(id string, msgc chan<- *rfc5424.Message, donec <-chan struct{}) []*rfc5424.Message { return a.getBuffer(id).ReadAndSubscribe(msgc, donec) }
Read returns the buffered messages and adds a subscriber channel for id.
ReadAndSubscribe
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func (a *Aggregator) Reset() { a.bmu.Lock() defer a.bmu.Unlock() for k, buf := range a.buffers { buf.Close() delete(a.buffers, k) } }
Reset clears all buffered data and closes subscribers.
Reset
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func (a *Aggregator) Shutdown() { a.Reset() close(a.msgc) }
Shutdown stops the Aggregator, resets the buffers, and closes buffer subscribers.
Shutdown
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func (a *Aggregator) Subscribe(id string, msgc chan<- *rfc5424.Message, donec <-chan struct{}) { a.getBuffer(id).Subscribe(msgc, donec) }
Read adds a subscriber channel for id.
Subscribe
go
flynn/flynn
logaggregator/aggregator.go
https://github.com/flynn/flynn/blob/master/logaggregator/aggregator.go
BSD-3-Clause
func NewBuffer() *Buffer { return newBuffer(DefaultCapacity) }
NewBuffer returns an empty allocated Buffer with DefaultCapacity.
NewBuffer
go
flynn/flynn
logaggregator/buffer/buffer.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer.go
BSD-3-Clause
func (b *Buffer) Add(m *rfc5424.Message) error { b.mu.Lock() defer b.mu.Unlock() if b.length == -1 { return errors.New("buffer closed") } if b.head == nil { b.head = &message{Message: *m} b.tail = b.head } else { // iterate from newest to oldest through messages to find position // to insert new message for other := b.tail; other != nil; other = other.prev { if m.Timestamp.Equal(other.Timestamp) && bytes.Equal(m.StructuredData, other.StructuredData) { // duplicate log line return nil } if m.Timestamp.Before(other.Timestamp) { if other.prev == nil { // insert before other at head other.prev = &message{Message: *m, next: other} b.head = other.prev break } else { continue } } msg := &message{Message: *m, prev: other} if other.next != nil { // insert between other and other.next other.next.prev = msg msg.next = other.next } else { // insert at tail b.tail = msg } other.next = msg break } } if b.length < b.capacity { // buffer not yet full b.length++ } else { // at capacity, remove head b.head = b.head.next b.head.prev = nil } for msgc := range b.subs { select { case msgc <- m: default: // chan is full, drop this message to it } } return nil }
Add adds an element to the Buffer. If the Buffer is already full, it removes an existing message.
Add
go
flynn/flynn
logaggregator/buffer/buffer.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer.go
BSD-3-Clause
func (b *Buffer) Read() []*rfc5424.Message { b.mu.RLock() defer b.mu.RUnlock() return b.read() }
Read returns a copied slice with the contents of the Buffer. It does not modify the underlying buffer in any way. You are free to modify the returned slice without affecting Buffer, though modifying the individual elements in the result will also modify those elements in the Buffer.
Read
go
flynn/flynn
logaggregator/buffer/buffer.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer.go
BSD-3-Clause
func (b *Buffer) ReadAndSubscribe(msgc chan<- *rfc5424.Message, donec <-chan struct{}) []*rfc5424.Message { b.mu.RLock() defer b.mu.RUnlock() b.subscribe(msgc, donec) return b.read() }
ReadAndSubscribe returns all buffered messages just like Read, and also returns a channel that will stream new messages as they arrive.
ReadAndSubscribe
go
flynn/flynn
logaggregator/buffer/buffer.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer.go
BSD-3-Clause
func (b *Buffer) Subscribe(msgc chan<- *rfc5424.Message, donec <-chan struct{}) { b.mu.RLock() defer b.mu.RUnlock() b.subscribe(msgc, donec) }
Subscribe returns a channel that sends all future messages added to the Buffer. The returned channel is buffered, and any attempts to send new messages to the channel will drop messages if the channel is full. The caller closes the donec channel to stop receiving messages.
Subscribe
go
flynn/flynn
logaggregator/buffer/buffer.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer.go
BSD-3-Clause
func (b *Buffer) read() []*rfc5424.Message { if b.length == -1 { return nil } buf := make([]*rfc5424.Message, 0, b.length) msg := b.head for msg != nil { buf = append(buf, &msg.Message) msg = msg.next } return buf }
_read expects b.mu to already be locked
read
go
flynn/flynn
logaggregator/buffer/buffer.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer.go
BSD-3-Clause
func (b *Buffer) subscribe(msgc chan<- *rfc5424.Message, donec <-chan struct{}) { b.subs[msgc] = struct{}{} go func() { select { case <-donec: case <-b.donec: } b.mu.Lock() defer b.mu.Unlock() delete(b.subs, msgc) close(msgc) }() }
_subscribe assumes b.mu is already locked
subscribe
go
flynn/flynn
logaggregator/buffer/buffer.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer.go
BSD-3-Clause
func Test(t *testing.T) { TestingT(t) }
Hook gocheck up to the "go test" runner
Test
go
flynn/flynn
logaggregator/buffer/buffer_test.go
https://github.com/flynn/flynn/blob/master/logaggregator/buffer/buffer_test.go
BSD-3-Clause
func WriteTo(buffers [][]*rfc5424.Message, w io.Writer) error { enc := gob.NewEncoder(w) return writeTo(buffers, enc) }
WriteTo writes a snapshot of the buffers to the writer. The partitioning of messages is not retained. The writer is left open.
WriteTo
go
flynn/flynn
logaggregator/snapshot/snapshot.go
https://github.com/flynn/flynn/blob/master/logaggregator/snapshot/snapshot.go
BSD-3-Clause
func StreamTo(buffers [][]*rfc5424.Message, msgc <-chan *rfc5424.Message, w io.Writer) error { enc := gob.NewEncoder(w) if err := writeTo(buffers, enc); err != nil { return err } for msg := range msgc { if err := enc.Encode(msg); err != nil { return err } } return nil }
StreamTo writes a snapshot of the buffers to the writer, then writes messages from the channel to the writer. The writer is left open.
StreamTo
go
flynn/flynn
logaggregator/snapshot/snapshot.go
https://github.com/flynn/flynn/blob/master/logaggregator/snapshot/snapshot.go
BSD-3-Clause
func NewScanner(r io.Reader) *Scanner { return &Scanner{dec: gob.NewDecoder(r)} }
NewScanner returns a new Scanner reading from r.
NewScanner
go
flynn/flynn
logaggregator/snapshot/snapshot.go
https://github.com/flynn/flynn/blob/master/logaggregator/snapshot/snapshot.go
BSD-3-Clause
func Test(t *testing.T) { TestingT(t) }
Hook gocheck up to the "go test" runner
Test
go
flynn/flynn
logaggregator/snapshot/snapshot_test.go
https://github.com/flynn/flynn/blob/master/logaggregator/snapshot/snapshot_test.go
BSD-3-Clause
func newClient(url string, http *http.Client) *Client { return &Client{ Client: &httpclient.Client{ ErrNotFound: ErrNotFound, URL: url, HTTP: http, }, } }
newClient creates a generic Client object, additional attributes must be set by the caller
newClient
go
flynn/flynn
logaggregator/client/client.go
https://github.com/flynn/flynn/blob/master/logaggregator/client/client.go
BSD-3-Clause
func New(uri string) (*Client, error) { return NewWithHTTP(uri, httphelper.RetryClient) }
NewClient creates a new Client pointing at uri.
New
go
flynn/flynn
logaggregator/client/client.go
https://github.com/flynn/flynn/blob/master/logaggregator/client/client.go
BSD-3-Clause
func NewWithHTTP(uri string, httpClient *http.Client) (*Client, error) { if uri == "" { uri = "http://logaggregator.discoverd" } u, err := url.Parse(uri) if err != nil { return nil, err } return newClient(u.String(), httpClient), nil }
NewClient creates a new Client pointing at uri with the specified http client.
NewWithHTTP
go
flynn/flynn
logaggregator/client/client.go
https://github.com/flynn/flynn/blob/master/logaggregator/client/client.go
BSD-3-Clause
func (c *Client) GetLog(channelID string, opts *logagg.LogOpts) (io.ReadCloser, error) { path := fmt.Sprintf("/log/%s", channelID) if opts != nil { if encodedQuery := opts.EncodedQuery(); encodedQuery != "" { path = fmt.Sprintf("%s?%s", path, encodedQuery) } } res, err := c.RawReq("GET", path, nil, nil, nil) if err != nil { return nil, err } return res.Body, nil }
GetLog returns a ReadCloser log stream of the log channel with ID channelID. Each line returned will be a JSON serialized Message. If lines is above zero, the number of lines returned will be capped at that value. Otherwise, all available logs are returned. If follow is true, new log lines are streamed after the buffered log.
GetLog
go
flynn/flynn
logaggregator/client/client.go
https://github.com/flynn/flynn/blob/master/logaggregator/client/client.go
BSD-3-Clause
func main() { os.Exit(mage.Main()) }
This file allows someone to run mage commands without mage installed by running `go run mage.go TARGET`. See https://magefile.org/zeroinstall/
main
go
aquasecurity/trivy-operator
mage.go
https://github.com/aquasecurity/trivy-operator/blob/master/mage.go
Apache-2.0
func getWorkingDir() string { wd, err := os.Getwd() if err != nil { fmt.Println("Error getting the current working directory:", err) os.Exit(1) } return wd }
Function to get the current working directory using os.Getwd()
getWorkingDir
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (b Build) Binary() error { fmt.Println("Building trivy-operator binary...") return sh.RunWithV(LINUX_ENV, "go", "build", "-o", "./bin/trivy-operator", "./cmd/trivy-operator/main.go") }
Target for building trivy-operator binary.
Binary
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func getGinkgo() error { fmt.Println("Installing Ginkgo CLI...") return sh.RunWithV(ENV, "go", "install", "github.com/onsi/ginkgo/v2/ginkgo") }
Target for installing Ginkgo CLI.
getGinkgo
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func getQTC() error { fmt.Println("Installing quicktemplate compiler...") return sh.RunWithV(ENV, "go", "install", "github.com/valyala/quicktemplate/qtc") }
Target for installing quicktemplate compiler.
getQTC
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func compileTemplates() error { fmt.Println("Converting quicktemplate files to Go code...") return sh.RunWithV(ENV, filepath.Join(GOBIN, "qtc")) }
Target for converting quicktemplate files (*.qtpl) into Go code.
compileTemplates
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (t Test) Unit() error { fmt.Println("Running tests...") return sh.RunWithV(ENV, "go", "test", "-v", "-short", "-timeout", "60s", "-coverprofile=coverage.txt", "./...") }
Target for running unit tests.
Unit
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (t Test) Integration() error { fmt.Println("Preparing integration tests for Trivy Operator...") mg.Deps(checkEnvKubeconfig, checkEnvOperatorNamespace, checkEnvOperatorTargetNamespace, getGinkgo) mg.Deps(prepareImages) fmt.Println("Running integration tests for Trivy Operator...") return sh.RunV(GINKGO, "-v", "-coverprofile=coverage.txt", "-coverpkg=github.com/aquasecurity/trivy-operator/pkg/operator,"+ "github.com/aquasecurity/trivy-operator/pkg/operator/predicate,"+ "github.com/aquasecurity/trivy-operator/pkg/operator/controller,"+ "github.com/aquasecurity/trivy-operator/pkg/plugin,"+ "github.com/aquasecurity/trivy-operator/pkg/plugin/trivy,"+ "github.com/aquasecurity/trivy-operator/pkg/configauditreport,"+ "github.com/aquasecurity/trivy-operator/pkg/vulnerabilityreport", "./tests/itest/trivy-operator") }
Target for running integration tests for Trivy Operator.
Integration
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func prepareImages() error { images := []string{ "mirror.gcr.io/knqyf263/vuln-image:1.2.3", "wordpress:4.9", "wordpress:6.7", } fmt.Printf("Preparing %d image(s) for Trivy Operator...\n", len(images)) for _, image := range images { fmt.Printf("Preparing image %q for Trivy Operator...\n", image) err := sh.Run("docker", "pull", image) if err != nil { return fmt.Errorf("couldn't pull image %q: %v", image, err) } err = sh.Run("kind", "load", "docker-image", image) if err != nil { return fmt.Errorf("couldn't load image %q: %v", image, err) } } return nil }
Target for downloading test images and upload them into KinD
prepareImages
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func checkEnvironmentVariable(name string) error { envVar := os.Getenv(name) if envVar == "" { return fmt.Errorf("Environment variable %q is not set", name) } fmt.Println(name, "=", envVar) return nil }
Targets for checking if environment variables are set.
checkEnvironmentVariable
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (t Tool) Clean() { fmt.Println("Removing build artifacts...") removeDir(filepath.Join(".", "bin")) removeDir(filepath.Join(".", "dist")) }
Target for removing build artifacts
Clean
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (b Build) DockerAll() { fmt.Println("Building Docker images for all binaries...") b.Docker() b.DockerUbi8() }
Target for building Docker images for all binaries
DockerAll
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (b Build) Docker() error { fmt.Println("Building Docker image for trivy-operator...") return sh.RunV("docker", "build", "--no-cache", "-t", TRIVY_OPERATOR_IMAGE, "-f", "build/trivy-operator/Dockerfile", "bin") }
Target for building Docker image for trivy-operator
Docker
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (b Build) DockerUbi8() error { fmt.Println("Building Docker image for trivy-operator ubi8...") return sh.RunV("docker", "build", "--no-cache", "-f", "build/trivy-operator/Dockerfile.ubi8", "-t", TRIVY_OPERATOR_IMAGE_UBI8, "bin") }
Target for building Docker image for trivy-operator ubi8
DockerUbi8
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (b Build) KindLoadImages() error { fmt.Println("Loading Docker images into the KIND cluster...") mg.Deps(b.Docker, b.DockerUbi8) return sh.RunV(KIND, "load", "docker-image", TRIVY_OPERATOR_IMAGE, TRIVY_OPERATOR_IMAGE_UBI8) }
Target for loading Docker images into the KIND cluster
KindLoadImages
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (d Docs) Serve() error { fmt.Println("Running MkDocs development server...") err := sh.RunV("docker", "build", "-t", MKDOCS_IMAGE, "-f", "build/mkdocs-material/Dockerfile", "build/trivy-operator") if err != nil { return err } return sh.RunV("docker", "run", "--name", "mkdocs-serve", "--rm", "-v", fmt.Sprintf("%s:/docs", PWD), "-p", fmt.Sprintf("%d:8000", MKDOCS_PORT), MKDOCS_IMAGE) }
Target for running MkDocs development server to preview the operator documentation page
Serve
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func installLabeler() error { fmt.Println("Installing the labeler tool...") return sh.RunWithV(GOBINENV, "go", "install", "github.com/knqyf263/labeler@latest") }
Target for installing the labeler tool
installLabeler
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func localBin() error { fmt.Println("Creating LOCALBIN directory...") return os.MkdirAll(LOCALBIN, os.ModePerm) }
Target for creating the LOCALBIN directory
localBin
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func controllerGen() error { mg.Deps(localBin) fmt.Println("Downloading controller-gen...") return sh.RunWithV(GOLOCALBINENV, "go", "install", "sigs.k8s.io/controller-tools/cmd/controller-gen@"+CONTROLLER_TOOLS_VERSION) }
Target for downloading controller-gen locally if necessary
controllerGen
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (t Test) envTestBin() error { mg.Deps(localBin) fmt.Println("Downloading envtest-setup...") return sh.RunWithV(GOLOCALBINENV, "go", "install", "sigs.k8s.io/controller-runtime/tools/setup-envtest@latest") }
Target for downloading envtest-setup locally if necessary
envTestBin
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (g Generate) Verify() { fmt.Println("Verifying generated artifacts...") mg.Deps(g.All, g.verifyFilesDiff) }
Target for verifying generated artifacts
Verify
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (g Generate) Code() error { fmt.Println("Generating code and manifests...") mg.Deps(controllerGen) return sh.RunV(CONTROLLER_GEN, "object:headerFile=hack/boilerplate.go.txt", "paths=./pkg/...", "+rbac:roleName=trivy-operator", "output:rbac:artifacts:config=deploy/helm/generated") }
Target for generating code and manifests
Code
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (g Generate) Manifests() error { fmt.Println("Generating CRDs and updating static YAML...") mg.Deps(controllerGen) err := sh.RunV(CONTROLLER_GEN, "crd:allowDangerousTypes=true", "paths=./pkg/apis/...", "output:crd:artifacts:config=deploy/helm/crds") if err != nil { return err } return sh.RunV("./hack/update-static.yaml.sh") }
Target for generating CRDs and updating static YAML
Manifests
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (g Generate) All() { fmt.Println("Generating all artifacts...") mg.Deps(g.Code, g.Manifests) }
Target for generating all artifacts
All
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (g Generate) Docs() error { fmt.Println("Generating Helm documentation...") err := sh.RunWithV(GOLOCALBINENV, "go", "install", "github.com/norwoodj/helm-docs/cmd/helm-docs@latest") if err != nil { return err } return sh.RunV(HELM_DOCS_GEN, "./deploy") }
Target for generating Helm documentation
Docs
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (g Generate) VerifyDocs() error { fmt.Println("Verifying generated Helm documentation...") mg.Deps(g.Docs) return g.verifyFilesDiff() }
Target for verifying generated Helm documentation
VerifyDocs
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func goEnv(envVar string) string { cmd := exec.Command("go", "env", envVar) output, err := cmd.Output() if err != nil { fmt.Printf("Error retrieving Go environment variable %s: %v\n", envVar, err) os.Exit(1) } return string(output) }
GoEnv returns the value of a Go environment variable.
goEnv
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (t Test) Envtest() error { mg.Deps(t.envTestBin) output, err := sh.Output(filepath.Join(PWD, "bin", "setup-envtest"), "use", ENVTEST_K8S_VERSION, "-p", "path") if err != nil { return err } mg.Deps(t.envTestBin) return sh.RunWithV(map[string]string{"KUBEBUILDER_ASSETS": output}, "go", "test", "-v", "-timeout", "60s", "-coverprofile=coverage.txt", "./tests/envtest/...") }
Target for running kubernetes envtests.
Envtest
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func removeDir(path string) error { return sh.RunV("rm", "-r", path) }
removeDir removes the directory at the given path.
removeDir
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (Tool) Aqua() error { if exists(filepath.Join(GOBIN, "aqua")) { return nil } return sh.Run("go", "install", "github.com/aquaproj/aqua/v2/cmd/[email protected]") }
Target install Aqua tools if not installed
Aqua
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (Lint) Fix() error { //mg.Deps(Tool{}.GolangciLint) return sh.RunV("golangci-lint", "run", "--fix") }
Fix auto fixes linters
Fix
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func (t Tool) GolangciLint() error { const version = "v1.64.2" bin := filepath.Join(GOBIN, "golangci-lint") if exists(bin) && t.matchGolangciLintVersion(bin, version) { return nil } command := fmt.Sprintf("curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b %s %s", GOBIN, version) return sh.Run("bash", "-c", command) }
GolangciLint installs golangci-lint
GolangciLint
go
aquasecurity/trivy-operator
magefile.go
https://github.com/aquasecurity/trivy-operator/blob/master/magefile.go
Apache-2.0
func initializeMaps(m dsl.Matcher) { m.Match(`map[$key]$value{}`). Suggest(`make(map[$key]$value)`). Report(`replace '$$' with 'make(map[$key]$value)`) }
cf. https://github.com/uber-go/guide/blob/master/style.md#initializing-maps
initializeMaps
go
aquasecurity/trivy-operator
misc/lint/rules.go
https://github.com/aquasecurity/trivy-operator/blob/master/misc/lint/rules.go
Apache-2.0
func errorsJoin(m dsl.Matcher) { m.Match(`errors.Join($x...)`). Report("use github.com/hashicorp/go-multierror.Append instead of errors.Join.") m.Match(`errors.Join($*args)`). Report("use github.com/hashicorp/go-multierror.Append instead of errors.Join.") }
While errors.Join from standard library can combine multiple errors, we use hashicorp/go-multierror for more user-friendly error outputs.
errorsJoin
go
aquasecurity/trivy-operator
misc/lint/rules.go
https://github.com/aquasecurity/trivy-operator/blob/master/misc/lint/rules.go
Apache-2.0
func NewReadWriter(objectResolver *kube.ObjectResolver) ReadWriter { return &readWriter{ ObjectResolver: objectResolver, } }
NewReadWriter constructs a new ReadWriter which is using the client package provided by the controller-runtime libraries for interacting with the Kubernetes API server.
NewReadWriter
go
aquasecurity/trivy-operator
pkg/sbomreport/io.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/sbomreport/io.go
Apache-2.0
func (w *cm) GenerateComplianceReport(ctx context.Context, spec v1alpha1.ReportSpec) error { trivyResults, err := misconfigReportToTrivyResults(w.client, ctx) if err != nil { return err } status, err := w.buildComplianceReport(spec, trivyResults) if err != nil { return err } // generate cluster compliance report updatedReport, err := w.createComplianceReport(ctx, spec, status) if err != nil { return err } // update compliance report status return w.client.Status().Update(ctx, updatedReport) }
GenerateComplianceReport generate and public compliance report by spec
GenerateComplianceReport
go
aquasecurity/trivy-operator
pkg/compliance/io.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/compliance/io.go
Apache-2.0
func (w *cm) createComplianceReport(ctx context.Context, reportSpec v1alpha1.ReportSpec, reportStatus v1alpha1.ReportStatus) (*v1alpha1.ClusterComplianceReport, error) { reportStatus.UpdateTimestamp = metav1.NewTime(ext.NewSystemClock().Now()) r := v1alpha1.ClusterComplianceReport{ ObjectMeta: metav1.ObjectMeta{ Name: strings.ToLower(reportSpec.Compliance.ID), }, Status: reportStatus, } var existing v1alpha1.ClusterComplianceReport err := w.client.Get(ctx, types.NamespacedName{ Name: strings.ToLower(reportSpec.Compliance.ID), }, &existing) if err != nil { return nil, fmt.Errorf("compliance crd with name %s is missing", reportSpec.Compliance.ID) } copied := existing.DeepCopy() copied.Labels = r.Labels copied.Status = r.Status copied.Spec = reportSpec copied.Status.UpdateTimestamp = metav1.NewTime(ext.NewSystemClock().Now()) return copied, nil }
createComplianceReport create compliance report
createComplianceReport
go
aquasecurity/trivy-operator
pkg/compliance/io.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/compliance/io.go
Apache-2.0
func (w *cm) buildComplianceReport(spec v1alpha1.ReportSpec, complianceResults []ttypes.Results) (v1alpha1.ReportStatus, error) { trivyCompSpec := v1alpha1.ToComplianceSpec(spec.Compliance) cr, err := report.BuildComplianceReport(complianceResults, trivyCompSpec) if err != nil { return v1alpha1.ReportStatus{}, err } summary := v1alpha1.TotalsCheckCount(cr) switch spec.ReportFormat { case v1alpha1.ReportSummary: rs := report.BuildSummary(cr) return v1alpha1.ReportStatus{SummaryReport: v1alpha1.FromSummaryReport(rs), Summary: summary}, nil case v1alpha1.ReportDetail: return v1alpha1.ReportStatus{DetailReport: v1alpha1.FromDetailReport(cr), Summary: summary}, nil default: return v1alpha1.ReportStatus{}, errors.New("report type is invalid") } }
BuildComplianceReport build compliance based on report type {summary | detail}
buildComplianceReport
go
aquasecurity/trivy-operator
pkg/compliance/io.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/compliance/io.go
Apache-2.0
func misconfigReportToTrivyResults(cli client.Client, ctx context.Context) ([]ttypes.Results, error) { resultsArray := make([]ttypes.Results, 0) // collect configaudit report data caObjList := &v1alpha1.ConfigAuditReportList{} err := cli.List(ctx, caObjList) if err != nil { return nil, err } for _, ca := range caObjList.Items { results := reportsToResults(ca.Report.Checks, ca.Name, ca.Namespace) resultsArray = append(resultsArray, results) } // collect rbac assessment report data raObjList := &v1alpha1.RbacAssessmentReportList{} err = cli.List(ctx, raObjList) if err != nil { return nil, err } for _, ra := range raObjList.Items { results := reportsToResults(ra.Report.Checks, ra.Name, ra.Namespace) resultsArray = append(resultsArray, results) } // collect cluster rbac assessment report data craObjList := &v1alpha1.ClusterRbacAssessmentReportList{} err = cli.List(ctx, craObjList) if err != nil { return nil, err } for _, cra := range craObjList.Items { results := reportsToResults(cra.Report.Checks, cra.Name, cra.Namespace) resultsArray = append(resultsArray, results) } // collect infra assessment report data iaObjList := &v1alpha1.InfraAssessmentReportList{} err = cli.List(ctx, iaObjList) if err != nil { return nil, err } for _, ia := range iaObjList.Items { results := reportsToResults(ia.Report.Checks, ia.Name, ia.Namespace) resultsArray = append(resultsArray, results) } // collect cluster infra assessment report data ciaObjList := &v1alpha1.ClusterInfraAssessmentReportList{} err = cli.List(ctx, ciaObjList) if err != nil { return nil, err } for _, cia := range ciaObjList.Items { results := reportsToResults(cia.Report.Checks, cia.Name, cia.Namespace) resultsArray = append(resultsArray, results) } return resultsArray, nil }
MisconfigReportToTrivyResults convert misconfig and infra assessment report Data to trivy results
misconfigReportToTrivyResults
go
aquasecurity/trivy-operator
pkg/compliance/io.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/compliance/io.go
Apache-2.0
func GetServerFromImageRef(imageRef string) (string, error) { ref, err := containerimage.ParseReference(imageRef) if err != nil { return "", err } return ref.Context().RegistryStr(), nil }
GetServerFromImageRef returns registry server from the specified imageRef.
GetServerFromImageRef
go
aquasecurity/trivy-operator
pkg/docker/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/docker/config.go
Apache-2.0
func GetServerFromDockerAuthKey(key string) (string, error) { if !(strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://")) { key = fmt.Sprintf("https://%s", key) } parsed, err := url.Parse(key) if err != nil { return "", err } return parsed.Host, nil }
GetServerFromDockerAuthKey returns the registry server for the specified Docker auth key. In ~/.docker/config.json auth keys can be specified as URLs or host names. For the sake of comparison we need to normalize the registry identifier.
GetServerFromDockerAuthKey
go
aquasecurity/trivy-operator
pkg/docker/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/docker/config.go
Apache-2.0
func NewGoogleUUIDGenerator() IDGenerator { return &googleUUIDGenerator{} }
NewGoogleUUIDGenerator constructs a new IDGenerator implemented with Google's UUID module.
NewGoogleUUIDGenerator
go
aquasecurity/trivy-operator
pkg/ext/id_generator.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/ext/id_generator.go
Apache-2.0
func NewSimpleIDGenerator() IDGenerator { return &simpleIDGenerator{} }
NewSimpleIDGenerator constructs a simple IDGenerator that starts at 1, increments up to 999999999999, and then rolls over.
NewSimpleIDGenerator
go
aquasecurity/trivy-operator
pkg/ext/id_generator.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/ext/id_generator.go
Apache-2.0
func SliceContainsString(slice []string, value string) bool { exists := false for _, targetNamespace := range slice { if targetNamespace == value { exists = true } } return exists }
SliceContainsString returns true if the specified slice of strings contains the give value, false otherwise.
SliceContainsString
go
aquasecurity/trivy-operator
pkg/ext/lang.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/ext/lang.go
Apache-2.0
func GetDefaultConfig() ConfigData { return map[string]string{ keyVulnerabilityReportsScanner: "Trivy", keyConfigAuditReportsScanner: "Trivy", KeyScanJobcompressLogs: "true", keyComplianceFailEntriesLimit: "10", KeyReportRecordFailedChecksOnly: "true", KeyNodeCollectorImageRef: "gcr.io/aquasecurity/node-collector:0.3.1", KeyPoliciesBundleOciRef: "mirror.gcr.io/aquasec/trivy-checks:1", } }
GetDefaultConfig returns the default configuration settings.
GetDefaultConfig
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0
func (c ConfigData) Set(key, value string) { c[key] = value }
Set sets a key on config data
Set
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0
func (c ConfigData) CompressLogs() bool { return c.getBoolKey(KeyScanJobcompressLogs) }
CompressLogs returns if scan job output should be compressed
CompressLogs
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0
func (c ConfigData) VulnerabilityScannerEnabled() bool { return c.getBoolKey(KeyVulnerabilityScannerEnabled) }
VulnerabilityScannerEnabled returns if the vulnerability scanners is enabled/disablsed
VulnerabilityScannerEnabled
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0
func (c ConfigData) ExposedSecretsScannerEnabled() bool { return c.getBoolKey(KeyExposedSecretsScannerEnabled) }
ExposedSecretsScannerEnabled returns if the vulnerability scanners is enabled/disablsed
ExposedSecretsScannerEnabled
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0
func (c ConfigData) GenerateSbomEnabled() bool { return c.getBoolKey(KeyGenerateSbom) }
GenerateSbomEnabled returns if the sbom generation is enabled
GenerateSbomEnabled
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0
func GetVersionFromImageRef(imageRef string) (string, error) { ref, err := containerimage.ParseReference(imageRef) if err != nil { return "", fmt.Errorf("parsing reference: %w", err) } var version string switch t := ref.(type) { case containerimage.Tag: version = t.TagStr() case containerimage.Digest: version = t.DigestStr() }
GetVersionFromImageRef returns the image identifier for the specified image reference.
GetVersionFromImageRef
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0
func NewConfigManager(client kubernetes.Interface, namespace string) ConfigManager { return &configManager{ client: client, namespace: namespace, } }
NewConfigManager constructs a new ConfigManager that is using kubernetes.Interface to manage ConfigData backed by the ConfigMap stored in the specified namespace.
NewConfigManager
go
aquasecurity/trivy-operator
pkg/trivyoperator/config.go
https://github.com/aquasecurity/trivy-operator/blob/master/pkg/trivyoperator/config.go
Apache-2.0