code
stringlengths
11
335k
docstring
stringlengths
20
11.8k
func_name
stringlengths
1
100
language
stringclasses
1 value
repo
stringclasses
245 values
path
stringlengths
4
144
url
stringlengths
43
214
license
stringclasses
4 values
func (d *Client) Ping(ctx context.Context) error { // Define the test query q := query.Query{ Query: "SELECT 1", } // Use the existing RunQueryWithoutResult method err := d.RunQueryWithoutResult(ctx, &q) if err != nil { return errors.Wrap(err, "failed to run test query on Bigquery connection") } return nil // Return nil if the query runs successfully }
Test runs a simple query (SELECT 1) to validate the connection.
Ping
go
bruin-data/bruin
pkg/bigquery/db.go
https://github.com/bruin-data/bruin/blob/master/pkg/bigquery/db.go
Apache-2.0
func NewLineageExtractor(parser sqlParser) *LineageExtractor { return &LineageExtractor{ sqlParser: parser, renderer: jinja.NewRendererWithYesterday("lineage-parser", "lineage-parser"), } }
NewLineageExtractor creates a new LineageExtractor instance.
NewLineageExtractor
go
bruin-data/bruin
pkg/lineage/lineage.go
https://github.com/bruin-data/bruin/blob/master/pkg/lineage/lineage.go
Apache-2.0
func (p *LineageExtractor) TableSchema(foundPipeline *pipeline.Pipeline) sqlparser.Schema { columnMetadata := make(sqlparser.Schema) for _, foundAsset := range foundPipeline.Assets { if len(foundAsset.Columns) > 0 { columnMetadata[foundAsset.Name] = makeColumnMap(foundAsset.Columns) } } return columnMetadata }
TableSchema extracts the table schema from the assets and stores it in the columnMetadata map.
TableSchema
go
bruin-data/bruin
pkg/lineage/lineage.go
https://github.com/bruin-data/bruin/blob/master/pkg/lineage/lineage.go
Apache-2.0
func (p *LineageExtractor) TableSchemaForUpstreams(foundPipeline *pipeline.Pipeline, asset *pipeline.Asset) sqlparser.Schema { columnMetadata := make(sqlparser.Schema) for _, upstream := range asset.Upstreams { if upstream.Type != "asset" { continue } upstreamAsset := foundPipeline.GetAssetByName(upstream.Value) if upstreamAsset == nil { continue } if len(upstreamAsset.Columns) > 0 { columnMetadata[upstreamAsset.Name] = makeColumnMap(upstreamAsset.Columns) } } return columnMetadata }
TableSchemaForUpstreams extracts the table schema for a single asset and returns a sqlparser schema only for its upstreams.
TableSchemaForUpstreams
go
bruin-data/bruin
pkg/lineage/lineage.go
https://github.com/bruin-data/bruin/blob/master/pkg/lineage/lineage.go
Apache-2.0
func (p *LineageExtractor) ColumnLineage(foundPipeline *pipeline.Pipeline, asset *pipeline.Asset, processedAssets map[string]bool) *LineageError { issues := LineageError{ Issues: []*LineageIssue{}, } if asset == nil { return nil } if processedAssets[asset.Name] { return nil } processedAssets[asset.Name] = true for _, upstream := range asset.Upstreams { upstreamAsset := foundPipeline.GetAssetByName(upstream.Value) if upstreamAsset == nil { continue } if errIssues := p.ColumnLineage(foundPipeline, upstreamAsset, processedAssets); errIssues != nil { issues.Issues = append(issues.Issues, errIssues.Issues...) } } // TODO: Currently we are ignoring non user errors, we should handle them lineageError, _ := p.parseLineage(foundPipeline, asset, p.TableSchemaForUpstreams(foundPipeline, asset)) if lineageError != nil { issues.Issues = append(issues.Issues, lineageError) } return &issues }
ColumnLineage processes the lineage of an asset and its upstream dependencies recursively.
ColumnLineage
go
bruin-data/bruin
pkg/lineage/lineage.go
https://github.com/bruin-data/bruin/blob/master/pkg/lineage/lineage.go
Apache-2.0
func (p *LineageExtractor) parseLineage(foundPipeline *pipeline.Pipeline, asset *pipeline.Asset, metadata sqlparser.Schema) (*LineageIssue, error) { if asset == nil { return nil, errors.New("invalid arguments: asset and pipeline cannot be nil") } dialect, err := sqlparser.AssetTypeToDialect(asset.Type) if err != nil { return nil, nil //nolint:nilerr } for _, upstream := range asset.Upstreams { upstreamAsset := foundPipeline.GetAssetByName(upstream.Value) if upstreamAsset == nil { return &LineageIssue{ Task: asset, Description: "upstream asset not found: " + upstream.Value, Context: []string{ asset.ExecutableFile.Content, }, }, errors.New("upstream asset not found: " + upstream.Value) } } query, err := p.renderer.Render(asset.ExecutableFile.Content) if err != nil { return &LineageIssue{ Task: asset, Description: err.Error(), Context: []string{ asset.ExecutableFile.Content, }, }, fmt.Errorf("failed to render the query: %w", err) } lineage, err := p.sqlParser.ColumnLineage(query, dialect, metadata) if err != nil { return nil, fmt.Errorf("failed to parse lineage: %w", err) } if len(lineage.Errors) > 0 { return &LineageIssue{ Task: asset, Description: strings.Join(lineage.Errors, ", "), Context: []string{ asset.ExecutableFile.Content, }, }, fmt.Errorf("failed to parse lineage: %s", strings.Join(lineage.Errors, ", ")) } if err := p.processLineageColumns(foundPipeline, asset, lineage); err != nil { return nil, fmt.Errorf("failed to process lineage: %w", err) } return nil, nil }
ParseLineage analyzes the column lineage for a given asset within a It traces column relationships between the asset and its upstream dependencies.
parseLineage
go
bruin-data/bruin
pkg/lineage/lineage.go
https://github.com/bruin-data/bruin/blob/master/pkg/lineage/lineage.go
Apache-2.0
func makeColumnMap(columns []pipeline.Column) map[string]string { if len(columns) == 0 { return make(map[string]string) } columnMap := make(map[string]string, len(columns)) for _, col := range columns { if col.Name != "" { columnMap[col.Name] = col.Type } } return columnMap }
makeColumnMap creates a map of column names to their types from a slice of columns.
makeColumnMap
go
bruin-data/bruin
pkg/lineage/lineage.go
https://github.com/bruin-data/bruin/blob/master/pkg/lineage/lineage.go
Apache-2.0
func columnHints(cols []pipeline.Column) string { hints := make([]string, 0) for _, col := range cols { typ := normaliseColumnType(col.Type) hint, exists := typeHintMapping[typ] if !exists { continue } name := normalizeColumnName(col.Name) hints = append(hints, fmt.Sprintf("%s:%s", name, hint)) } return strings.Join(hints, ",") }
columnHints returns an ingestr compatible type hint string that can be passed via the --column flag to the CLI.
columnHints
go
bruin-data/bruin
pkg/ingestr/types.go
https://github.com/bruin-data/bruin/blob/master/pkg/ingestr/types.go
Apache-2.0
func validateServiceAccountFile(filePath string) error { var jsonStr json.RawMessage if err := json.Unmarshal([]byte(filePath), &jsonStr); err == nil { return errors.New("please use service_account_json instead of service_account_file to define json") } file, err := os.ReadFile(filePath) if err != nil { return errors.Errorf("failed to read service account file at '%s': %v", filePath, err) } var js json.RawMessage if err := json.Unmarshal(file, &js); err != nil { return errors.Errorf("invalid JSON format in service account file at '%s'", filePath) } return nil }
New helper function to validate ServiceAccountFile.
validateServiceAccountFile
go
bruin-data/bruin
pkg/connection/helper.go
https://github.com/bruin-data/bruin/blob/master/pkg/connection/helper.go
Apache-2.0
func validateServiceAccountJSON(jsonStr string) error { // Check if the path exists and is a file if _, err := os.Stat(jsonStr); err == nil { return errors.New("please use service_account_file instead of service_account_json to define path") } var js json.RawMessage if err := json.Unmarshal([]byte(jsonStr), &js); err != nil { return errors.Errorf("invalid JSON format in service account JSON") } return nil }
New helper function to validate ServiceAccountJSON.
validateServiceAccountJSON
go
bruin-data/bruin
pkg/connection/helper.go
https://github.com/bruin-data/bruin/blob/master/pkg/connection/helper.go
Apache-2.0
func (s *Scheduler) Tick(result *TaskExecutionResult) bool { s.taskScheduleLock.Lock() defer s.taskScheduleLock.Unlock() if result.Instance.GetStatus() != Skipped { s.MarkTaskInstance(result.Instance, Succeeded, false) } if result.Error != nil { s.markTaskInstanceFailedWithDownstream(result.Instance) } if s.hasPipelineFinished() { close(s.WorkQueue) return true } tasks := s.getScheduleableTasks() if len(tasks) == 0 { return false } for _, task := range tasks { task.MarkAs(Queued) s.WorkQueue <- task } return false }
Tick marks an iteration of the scheduler loop. It is called when a result is received. The results are mainly fed from a channel, but Tick allows implementing additional methods of passing Asset results and simulating scheduler loops, e.g. time travel. It is also useful for testing purposes.
Tick
go
bruin-data/bruin
pkg/scheduler/scheduler.go
https://github.com/bruin-data/bruin/blob/master/pkg/scheduler/scheduler.go
Apache-2.0
func (s *Scheduler) Kickstart() { s.Tick(&TaskExecutionResult{ Instance: &AssetInstance{ Asset: &pipeline.Asset{ Name: "start", }, status: Succeeded, }, }) }
Kickstart initiates the scheduler process by sending a "start" task for the processing.
Kickstart
go
bruin-data/bruin
pkg/scheduler/scheduler.go
https://github.com/bruin-data/bruin/blob/master/pkg/scheduler/scheduler.go
Apache-2.0
func (c *Config) GetIngestrURI() string { return "applovinmax://?api_key=" + c.APIKey }
applovinmax://?api_key=<your_api_key>
GetIngestrURI
go
bruin-data/bruin
pkg/applovinmax/config.go
https://github.com/bruin-data/bruin/blob/master/pkg/applovinmax/config.go
Apache-2.0
func (db *DB) Ping(ctx context.Context) error { // Define the test query q := query.Query{ Query: "SELECT 1", } // Use the existing RunQueryWithoutResult method err := db.RunQueryWithoutResult(ctx, &q) if err != nil { return errors.Wrap(err, "failed to run test query on Snowflake connection") } return nil // Return nil if the query runs successfully }
Test runs a simple query (SELECT 1) to validate the connection.
Ping
go
bruin-data/bruin
pkg/snowflake/db.go
https://github.com/bruin-data/bruin/blob/master/pkg/snowflake/db.go
Apache-2.0
func ExcludeSubItemsInDirectoryContainingFile(filePaths []string, file string) []string { result := make([]string, 0, len(filePaths)) var targetsToRemove []string for _, path := range filePaths { if strings.HasSuffix(path, file) { targetsToRemove = append(targetsToRemove, filepath.Dir(path)) } } for _, path := range filePaths { shouldBeIncluded := true for _, target := range targetsToRemove { if strings.HasPrefix(path, target) && path != filepath.Join(target, file) { shouldBeIncluded = false break } } if shouldBeIncluded { result = append(result, path) } } return result }
ExcludeSubItemsInDirectoryContainingFile cleans up the list to remove sub-paths that are in the same directory as the file. The primary usage of this is to remove the sub-paths for the directory that contains `task.yml`.
ExcludeSubItemsInDirectoryContainingFile
go
bruin-data/bruin
pkg/path/file.go
https://github.com/bruin-data/bruin/blob/master/pkg/path/file.go
Apache-2.0
func (c *Client) Select(ctx context.Context, query *query.Query) ([][]interface{}, error) { rows, err := c.connection.Query(ctx, query.String()) if err != nil { return nil, err } defer rows.Close() collectedRows := make([][]interface{}, 0) for rows.Next() { result := RowScanner{} if err := rows.Scan(&result); err != nil { return nil, errors.Wrap(err, "failed to scan row") } collectedRows = append(collectedRows, result.values) } return collectedRows, nil }
Select runs a query and returns the results.
Select
go
bruin-data/bruin
pkg/clickhouse/db.go
https://github.com/bruin-data/bruin/blob/master/pkg/clickhouse/db.go
Apache-2.0
func (c *Client) Ping(ctx context.Context) error { q := query.Query{ Query: "SELECT 1", } err := c.RunQueryWithoutResult(ctx, &q) if err != nil { return errors.Wrap(err, "failed to run test query on Postgres connection") } return nil }
Test runs a simple query (SELECT 1) to validate the connection.
Ping
go
bruin-data/bruin
pkg/clickhouse/db.go
https://github.com/bruin-data/bruin/blob/master/pkg/clickhouse/db.go
Apache-2.0
func ValidateCustomCheckQueryExists(ctx context.Context, p *pipeline.Pipeline, asset *pipeline.Asset) ([]*Issue, error) { var issues []*Issue for _, check := range asset.CustomChecks { if check.Query == "" { issues = append(issues, &Issue{ Task: asset, Description: fmt.Sprintf("Custom check '%s' query cannot be empty", check.Name), }) } } return issues, nil }
ValidateCustomCheckQueryExists checks for duplicate column checks within a single column. It returns a slice of Issues, each representing a duplicate column check found.
ValidateCustomCheckQueryExists
go
bruin-data/bruin
pkg/lint/rules.go
https://github.com/bruin-data/bruin/blob/master/pkg/lint/rules.go
Apache-2.0
func ValidateDuplicateColumnNames(ctx context.Context, p *pipeline.Pipeline, asset *pipeline.Asset) ([]*Issue, error) { var issues []*Issue columnNames := make(map[string]bool) for _, column := range asset.Columns { lowercaseName := strings.ToLower(column.Name) if columnNames[lowercaseName] { issues = append(issues, &Issue{ Task: asset, Description: fmt.Sprintf("Duplicate column name '%s' found ", column.Name), }) } else { columnNames[lowercaseName] = true } } return issues, nil }
ValidateDuplicateColumnNames checks for duplicate column names within a single asset. It returns a slice of Issues, each representing a duplicate column name found. The function performs a case-insensitive comparison of column names. Parameters: - ctx: The context for the validation operation - p: A pointer to the pipeline.Pipeline struct - asset: The pipeline.Asset to be validated for duplicate column names. Returns: - A slice of *Issue, each describing a duplicate column name found. - An error, which is always nil in this implementation.
ValidateDuplicateColumnNames
go
bruin-data/bruin
pkg/lint/rules.go
https://github.com/bruin-data/bruin/blob/master/pkg/lint/rules.go
Apache-2.0
func EnsurePipelineHasNoCycles(p *pipeline.Pipeline) ([]*Issue, error) { issues := make([]*Issue, 0) for _, task := range p.Assets { for _, dep := range task.Upstreams { if dep.Type == "uri" { continue } if task.Name == dep.Value { issues = append(issues, &Issue{ Description: pipelineContainsCycle, Context: []string{fmt.Sprintf("Asset `%s` depends on itself", task.Name)}, }) } } } taskNameToIndex := make(map[string]int, len(p.Assets)) for i, task := range p.Assets { taskNameToIndex[task.Name] = i } g := graph.New(len(p.Assets)) for _, task := range p.Assets { for _, dep := range task.Upstreams { if dep.Type == "uri" { continue } g.Add(taskNameToIndex[task.Name], taskNameToIndex[dep.Value]) } } cycles := graph.StrongComponents(g) for _, cycle := range cycles { cycleLength := len(cycle) if cycleLength == 1 { continue } tasksInCycle := make(map[string]bool, cycleLength) for _, taskIndex := range cycle { tasksInCycle[p.Assets[taskIndex].Name] = true } context := make([]string, 0, cycleLength) for _, taskIndex := range cycle { task := p.Assets[taskIndex] for _, dep := range task.Upstreams { if dep.Type == "uri" { continue } if _, ok := tasksInCycle[dep.Value]; !ok { continue } context = append(context, fmt.Sprintf("%s ➜ %s", task.Name, dep.Value)) } } issues = append(issues, &Issue{ Description: pipelineContainsCycle, Context: context, }) } return issues, nil }
EnsurePipelineHasNoCycles ensures that the pipeline is a DAG, and contains no cycles. Since the pipelines are directed graphs, strongly connected components mean cycles, therefore they would be considered invalid for our pipelines. Strong connectivity wouldn't work for tasks that depend on themselves, therefore there's a specific check for that.
EnsurePipelineHasNoCycles
go
bruin-data/bruin
pkg/lint/rules.go
https://github.com/bruin-data/bruin/blob/master/pkg/lint/rules.go
Apache-2.0
func packageContextWithPrefix(zw *zip.Writer, context fs.FS, prefix string) error { // todo(turtledev): exclude assets from the packaged zip. // this shouldn't create any issues in most cases, but // adding this will fool-proof our implementation return fs.WalkDir(context, ".", func(name string, d fs.DirEntry, err error) error { if err != nil { return err } if d.IsDir() { // spark will refuse to treat a directory as a package // if it doesn't contain __init__.py zw.CreateHeader(&zip.FileHeader{ //nolint Name: path.Join(prefix, name, "__init__.py"), }) return nil } info, err := d.Info() if err != nil { return err } if !info.Mode().IsRegular() { return errors.New("package: cannot add non-regular file") } h, err := zip.FileInfoHeader(info) if err != nil { return err } h.Name = path.Join(prefix, name) h.Method = zip.Deflate fw, err := zw.CreateHeader(h) if err != nil { return err } f, err := context.Open(name) if err != nil { return err } defer f.Close() _, err = io.Copy(fw, f) return err }) }
zip.AddFS() modified with support for filesystem prefix and some spark specific adjustments.
packageContextWithPrefix
go
bruin-data/bruin
pkg/emr_serverless/package.go
https://github.com/bruin-data/bruin/blob/master/pkg/emr_serverless/package.go
Apache-2.0
func (job Job) prepareWorkspace(ctx context.Context) (*workspace, error) { workspaceURI, err := url.Parse(job.params.Workspace) if err != nil { return nil, fmt.Errorf("error parsing workspace URL: %w", err) } jobID, err := uuid.NewV7() if err != nil { return nil, fmt.Errorf("error generating job ID: %w", err) } jobURI := workspaceURI.JoinPath(job.pipeline.Name, jobID.String()) scriptPath := job.asset.ExecutableFile.Path fd, err := os.Open(scriptPath) if err != nil { return nil, fmt.Errorf("error opening file %q: %w", scriptPath, err) } defer fd.Close() scriptURI := jobURI.JoinPath(job.asset.ExecutableFile.Name) _, err = job.s3Client.PutObject(ctx, &s3.PutObjectInput{ Bucket: &scriptURI.Host, Key: aws.String(strings.TrimPrefix(scriptURI.Path, "/")), Body: fd, }) if err != nil { return nil, fmt.Errorf("error uploading entrypoint %q: %w", scriptURI, err) } fd, err = os.CreateTemp("", "bruin-spark-context-*.zip") if err != nil { return nil, fmt.Errorf("error creating temporary file %w", err) } defer os.Remove(fd.Name()) defer fd.Close() zipper := zip.NewWriter(fd) defer zipper.Close() pipelineRoot, err := path.GetPipelineRootFromTask(scriptPath, []string{"pipeline.yaml", "pipeline.yml"}) if err != nil { return nil, fmt.Errorf("error finding pipeline root: %w", err) } err = packageContextWithPrefix( zipper, os.DirFS(pipelineRoot), filepath.Base(pipelineRoot), ) if err != nil { return nil, fmt.Errorf("error packaging files: %w", err) } err = zipper.Close() if err != nil { return nil, fmt.Errorf("error closing zip writer: %w", err) } _, err = fd.Seek(0, 0) if err != nil { return nil, fmt.Errorf("error rewinding file %q: %w", fd.Name(), err) } contextURI := jobURI.JoinPath("context.zip") _, err = job.s3Client.PutObject(ctx, &s3.PutObjectInput{ Bucket: &contextURI.Host, Key: aws.String(strings.TrimPrefix(contextURI.Path, "/")), Body: fd, }) if err != nil { return nil, fmt.Errorf("error uploading context %q: %w", contextURI, err) } return &workspace{ Root: jobURI, Entrypoint: scriptURI.String(), Files: contextURI.String(), Logs: workspaceURI.JoinPath("logs").String(), }, nil }
prepareWorkspace sets up an s3 bucket for a pyspark job run.
prepareWorkspace
go
bruin-data/bruin
pkg/emr_serverless/operator.go
https://github.com/bruin-data/bruin/blob/master/pkg/emr_serverless/operator.go
Apache-2.0
func (c *Config) GetIngestrURI() string { return "s3://" + c.BucketName + "/" + c.PathToFile + "?access_key_id=" + c.AccessKeyID + "&secret_access_key=" + c.SecretAccessKey }
s3://<bucket_name>/<path_to_file>?access_key_id=<access_key_id>&secret_access_key=<secret_access_key>
GetIngestrURI
go
bruin-data/bruin
pkg/s3/config.go
https://github.com/bruin-data/bruin/blob/master/pkg/s3/config.go
Apache-2.0
func (u *UvChecker) EnsureUvInstalled(ctx context.Context) (string, error) { u.mut.Lock() defer u.mut.Unlock() // Check if uv is already installed m := user.NewConfigManager(afero.NewOsFs()) bruinHomeDirAbsPath, err := m.EnsureAndGetBruinHomeDir() if err != nil { return "", errors.Wrap(err, "failed to get bruin home directory") } var binaryName string if runtime.GOOS == "windows" { binaryName = "uv.exe" } else { binaryName = "uv" } uvBinaryPath := filepath.Join(bruinHomeDirAbsPath, binaryName) if _, err := os.Stat(uvBinaryPath); errors.Is(err, os.ErrNotExist) { err = u.installUvCommand(ctx, bruinHomeDirAbsPath) if err != nil { return "", err } return uvBinaryPath, nil } cmd := exec.Command(uvBinaryPath, "version", "--output-format", "json") output, err := cmd.CombinedOutput() if err != nil { return "", fmt.Errorf("failed to check uv version: %w -- Output: %s", err, output) } var uvVersion struct { Version string `json:"version"` } if err := json.Unmarshal(output, &uvVersion); err != nil { return "", fmt.Errorf("failed to parse uv version: %w", err) } if uvVersion.Version != UvVersion { err = u.installUvCommand(ctx, bruinHomeDirAbsPath) if err != nil { return "", err } return uvBinaryPath, nil } return uvBinaryPath, nil }
EnsureUvInstalled checks if uv is installed and installs it if not present, then returns the full path of the binary.
EnsureUvInstalled
go
bruin-data/bruin
pkg/python/uv.go
https://github.com/bruin-data/bruin/blob/master/pkg/python/uv.go
Apache-2.0
func (u *UvPythonRunner) ingestrInstallCmd(ctx context.Context, pkgs []string) []string { ingestrPackageName, isLocal := u.ingestrPackage(ctx) cmdline := []string{ "tool", "install", "--force", "--quiet", "--python", pythonVersionForIngestr, } for _, pkg := range pkgs { cmdline = append(cmdline, "--with", pkg) } if isLocal { cmdline = append(cmdline, "--reinstall") } cmdline = append(cmdline, ingestrPackageName) return cmdline }
ingestrInstallCmd returns the uv tool commandline args necessary for installing ingestr.
ingestrInstallCmd
go
bruin-data/bruin
pkg/python/uv.go
https://github.com/bruin-data/bruin/blob/master/pkg/python/uv.go
Apache-2.0
func (*RepoFinder) Repo(path string) (*Repo, error) { res, err := FindRepoFromPath(path) if err != nil { return nil, err } return res, nil }
Repo uses git by spawning a process to locate the top level directory.
Repo
go
bruin-data/bruin
pkg/git/root.go
https://github.com/bruin-data/bruin/blob/master/pkg/git/root.go
Apache-2.0
func isDirectory(path string) bool { fileInfo, err := os.Stat(path) if err != nil { return false } return fileInfo.IsDir() }
isDirectory determines if a file represented by `path` is a directory or not.
isDirectory
go
bruin-data/bruin
pkg/git/root.go
https://github.com/bruin-data/bruin/blob/master/pkg/git/root.go
Apache-2.0
func (c Config) ToDBConnectionURI() string { return c.Path }
ToDBConnectionURI returns a connection URI to be used with the pgx package.
ToDBConnectionURI
go
bruin-data/bruin
pkg/duckdb/config.go
https://github.com/bruin-data/bruin/blob/master/pkg/duckdb/config.go
Apache-2.0
func (c *Client) Select(ctx context.Context, query *query.Query) ([][]interface{}, error) { LockDatabase(c.config.ToDBConnectionURI()) defer UnlockDatabase(c.config.ToDBConnectionURI()) rows, err := c.connection.QueryContext(ctx, query.String()) if err != nil { return nil, err } if rows.Err() != nil { return nil, rows.Err() } defer rows.Close() result := make([][]interface{}, 0) cols, err := rows.Columns() if err != nil { return nil, err } for rows.Next() { columns := make([]interface{}, len(cols)) columnPointers := make([]interface{}, len(cols)) for i := range columns { columnPointers[i] = &columns[i] } // Scan the result into the column pointers... if err := rows.Scan(columnPointers...); err != nil { return nil, err } result = append(result, columns) } return result, nil }
Select runs a query and returns the results.
Select
go
bruin-data/bruin
pkg/duckdb/db.go
https://github.com/bruin-data/bruin/blob/master/pkg/duckdb/db.go
Apache-2.0
func (m *Mutex) TryLock(key interface{}) bool { for i := range m.maxRetry { m.m.Lock() if _, ok := m.locks[key]; ok { // if locked m.m.Unlock() time.Sleep(m.backoff(i)) } else { // if unlock, lockit m.locks[key] = struct{}{} m.m.Unlock() return true } } return false }
TryLock tries to acquire the lock.
TryLock
go
bruin-data/bruin
pkg/duckdb/lock.go
https://github.com/bruin-data/bruin/blob/master/pkg/duckdb/lock.go
Apache-2.0
func (m *Mutex) Unlock(key interface{}) { m.m.Lock() delete(m.locks, key) m.m.Unlock() }
Unlock unlocks for the key please call Unlock only after having acquired the lock.
Unlock
go
bruin-data/bruin
pkg/duckdb/lock.go
https://github.com/bruin-data/bruin/blob/master/pkg/duckdb/lock.go
Apache-2.0
func NewMapMutex() *Mutex { return &Mutex{ locks: make(map[interface{}]interface{}), m: &sync.Mutex{}, maxRetry: 200, maxDelay: 100000000, // 0.1 second baseDelay: 10, // 10 nanosecond factor: 1.1, jitter: 0.2, } }
NewMapMutex returns a mapmutex with default configs.
NewMapMutex
go
bruin-data/bruin
pkg/duckdb/lock.go
https://github.com/bruin-data/bruin/blob/master/pkg/duckdb/lock.go
Apache-2.0
func NewCustomizedMapMutex(mRetry int, mDelay, bDelay, factor, jitter float64) *Mutex { return &Mutex{ locks: make(map[interface{}]interface{}), m: &sync.Mutex{}, maxRetry: mRetry, maxDelay: mDelay, baseDelay: bDelay, factor: factor, jitter: jitter, } }
NewCustomizedMapMutex returns a customized mapmutex.
NewCustomizedMapMutex
go
bruin-data/bruin
pkg/duckdb/lock.go
https://github.com/bruin-data/bruin/blob/master/pkg/duckdb/lock.go
Apache-2.0
func (c Config) ToDBConnectionURI() string { connectionURI := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s&pool_max_conns=%d", url.PathEscape(c.Username), url.PathEscape(c.Password), net.JoinHostPort(c.Host, strconv.Itoa(c.Port)), c.Database, c.SslMode, c.PoolMaxConns, ) if c.Schema != "" { connectionURI += "&search_path=" + c.Schema } return connectionURI }
ToDBConnectionURI returns a connection URI to be used with the pgx package.
ToDBConnectionURI
go
bruin-data/bruin
pkg/postgres/config.go
https://github.com/bruin-data/bruin/blob/master/pkg/postgres/config.go
Apache-2.0
func (c RedShiftConfig) ToDBConnectionURI() string { connectionURI := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s&pool_max_conns=%d", url.PathEscape(c.Username), url.PathEscape(c.Password), net.JoinHostPort(c.Host, strconv.Itoa(c.Port)), c.Database, c.SslMode, c.PoolMaxConns, ) if c.Schema != "" { connectionURI += "&search_path=" + c.Schema } return connectionURI }
ToDBConnectionURI returns a connection URI to be used with the pgx package.
ToDBConnectionURI
go
bruin-data/bruin
pkg/postgres/config.go
https://github.com/bruin-data/bruin/blob/master/pkg/postgres/config.go
Apache-2.0
func (c *Client) Select(ctx context.Context, query *query.Query) ([][]interface{}, error) { rows, err := c.connection.Query(ctx, query.String()) if err != nil { return nil, err } defer rows.Close() collectedRows, err := pgx.CollectRows(rows, func(row pgx.CollectableRow) ([]interface{}, error) { return row.Values() }) if err != nil { return nil, errors.Wrap(err, "failed to collect row values") } if len(collectedRows) == 0 { return make([][]interface{}, 0), nil } return collectedRows, nil }
Select runs a query and returns the results.
Select
go
bruin-data/bruin
pkg/postgres/db.go
https://github.com/bruin-data/bruin/blob/master/pkg/postgres/db.go
Apache-2.0
func (c *Client) Ping(ctx context.Context) error { q := query.Query{ Query: "SELECT 1", } err := c.RunQueryWithoutResult(ctx, &q) if err != nil { return errors.Wrap(err, "failed to run test query on Postgres connection") } return nil }
Test runs a simple query (SELECT 1) to validate the connection.
Ping
go
bruin-data/bruin
pkg/postgres/db.go
https://github.com/bruin-data/bruin/blob/master/pkg/postgres/db.go
Apache-2.0
func GetConfigItems(data []*Config, path string) []ConfigItem { ret := make([]ConfigItem, 0, len(data)) for i, item := range data { ret = append(ret, newConfigItem(item, path, i)) } return ret }
GetConfigItems is used to generate ConfigItem from given config entity
GetConfigItems
go
storyicon/powerproto
pkg/configs/item.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/item.go
Apache-2.0
func (c *configItem) ID() string { return c.id }
ID is used to return to config unique id
ID
go
storyicon/powerproto
pkg/configs/item.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/item.go
Apache-2.0
func (c *configItem) Path() string { return c.path }
Path is used to return the config path
Path
go
storyicon/powerproto
pkg/configs/item.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/item.go
Apache-2.0
func (c *configItem) Config() *Config { return c.c }
Config is used to return the Config
Config
go
storyicon/powerproto
pkg/configs/item.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/item.go
Apache-2.0
func SaveConfigs(path string, configs ...*Config) error { parts := make([][]byte, 0, len(configs)) for _, config := range configs { data, err := yaml.Marshal(config) if err != nil { return err } parts = append(parts, data) } data := bytes.Join(parts, []byte("\r\n---\r\n")) if err := ioutil.WriteFile(path, data, fs.ModePerm); err != nil { return err } return nil }
SaveConfigs is used to save configs into files
SaveConfigs
go
storyicon/powerproto
pkg/configs/configs.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/configs.go
Apache-2.0
func LoadConfigs(path string) ([]*Config, error) { raw, err := ioutil.ReadFile(path) if err != nil { return nil, err } items, err := util.SplitYAML(raw) if err != nil { return nil, err } var ret []*Config for _, item := range items { var config Config if err := yaml.Unmarshal(item, &config); err != nil { return nil, err } ret = append(ret, &config) } return ret, nil }
LoadConfigs is used to load config from specified path
LoadConfigs
go
storyicon/powerproto
pkg/configs/configs.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/configs.go
Apache-2.0
func LoadConfigItems(path string) ([]ConfigItem, error) { data, err := LoadConfigs(path) if err != nil { return nil, err } return GetConfigItems(data, path), nil }
LoadConfigItems is similar to LoadConfigs, but obtains the abstraction of the Config prototype structure
LoadConfigItems
go
storyicon/powerproto
pkg/configs/configs.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/configs.go
Apache-2.0
func ListConfigPaths(sourceDir string) []string { var paths []string cur := sourceDir for { filePath := filepath.Join(cur, consts.ConfigFileName) paths = append(paths, filePath) next := filepath.Dir(cur) if next == cur { break } cur = next } paths = append(paths, consts.PathForGlobalConfig()) return paths }
ListConfigPaths is used to list all possible config paths
ListConfigPaths
go
storyicon/powerproto
pkg/configs/configs.go
https://github.com/storyicon/powerproto/blob/master/pkg/configs/configs.go
Apache-2.0
func MatchPath(pattern string, path string) (bool, error) { return doublestar.PathMatch(pattern, path) }
MatchPath is used to match path with specified pattern
MatchPath
go
storyicon/powerproto
pkg/util/file.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/file.go
Apache-2.0
func CopyDirectory(src, dst string) error { return filecopy.Copy(src, dst) }
CopyDirectory is used to copy directory If dst already exists, it will be merged
CopyDirectory
go
storyicon/powerproto
pkg/util/file.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/file.go
Apache-2.0
func CopyFile(src, dst string) error { sourceFileStat, err := os.Stat(src) if err != nil { return err } if !sourceFileStat.Mode().IsRegular() { return errors.Errorf("%s is not a regular file", src) } source, err := os.Open(src) if err != nil { return err } defer source.Close() if err := os.MkdirAll(filepath.Dir(dst), fs.ModePerm); err != nil { return err } destination, err := os.Create(dst) if err != nil { return err } defer destination.Close() _, err = io.Copy(destination, source) return err }
CopyFile is used to copy file from src to dst
CopyFile
go
storyicon/powerproto
pkg/util/file.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/file.go
Apache-2.0
func IsFileExists(path string) (bool, error) { info, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } if info.IsDir() { return false, errors.Errorf("%s is not a file", path) } return true, nil }
IsFileExists is used to check whether the file exists
IsFileExists
go
storyicon/powerproto
pkg/util/file.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/file.go
Apache-2.0
func IsDirExists(path string) (bool, error) { info, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } if !info.IsDir() { return false, errors.Errorf("%s is not a directory", path) } return true, nil }
IsDirExists is used to check whether the dir exists
IsDirExists
go
storyicon/powerproto
pkg/util/file.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/file.go
Apache-2.0
func GetFilesWithExtRecursively(target string, targetExt string) ([]string, error) { var data []string err := filepath.Walk(target, func(path string, info fs.FileInfo, err error) error { if info.IsDir() { return nil } ext := filepath.Ext(path) if ext == targetExt { data = append(data, path) } return nil }) return data, err }
GetFilesWithExtRecursively is used to recursively list files with a specific suffix expectExt should contain the prefix '.'
GetFilesWithExtRecursively
go
storyicon/powerproto
pkg/util/file.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/file.go
Apache-2.0
func GetFilesWithExt(dir string, targetExt string) ([]string, error) { children, err := ioutil.ReadDir(dir) if err != nil { return nil, err } var data []string for _, child := range children { if child.IsDir() { continue } if ext := filepath.Ext(child.Name()); ext != targetExt { continue } data = append(data, filepath.Join(dir, child.Name())) } return data, nil }
GetFilesWithExt is used to list files with a specific suffix expectExt should contain the prefix '.'
GetFilesWithExt
go
storyicon/powerproto
pkg/util/file.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/file.go
Apache-2.0
func SplitYAML(data []byte) ([][]byte, error) { decoder := yaml.NewDecoder(bytes.NewReader(data)) var parts [][]byte for { var value interface{} err := decoder.Decode(&value) if err == io.EOF { break } if err != nil { return nil, err } part, err := yaml.Marshal(value) if err != nil { return nil, err } parts = append(parts, part) } return parts, nil }
SplitYAML is used to split yaml
SplitYAML
go
storyicon/powerproto
pkg/util/yaml.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/yaml.go
Apache-2.0
func DumpYaml(cfg interface{}) { out, err := yaml.Marshal(cfg) if err != nil { fmt.Fprintln(os.Stderr, err) } else { fmt.Printf("%s\n", out) } }
DumpYaml is used to dump yaml into stdout
DumpYaml
go
storyicon/powerproto
pkg/util/yaml.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/yaml.go
Apache-2.0
func LoadConfig(filename string, pointer interface{}) error { buf, err := ioutil.ReadFile(filename) if err != nil { return multierror.Prefix(err, "Error reading config file") } err = yaml.UnmarshalStrict(buf, pointer) if err != nil { return multierror.Prefix(err, "Error parsing config file") } return nil }
LoadConfig read YAML-formatted config from filename into cfg.
LoadConfig
go
storyicon/powerproto
pkg/util/yaml.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/yaml.go
Apache-2.0
func SortSemanticVersion(items []string) ([]string, []string) { versionMap := make(map[*semver.Version]string, len(items)) versions := make(semver.Versions, 0, len(items)) var malformed []string for _, item := range items { s := item if strings.HasPrefix(s, "v") { s = item[1:] } version, err := semver.NewVersion(s) if err != nil { malformed = append(malformed, item) continue } versionMap[version] = item versions = append(versions, version) } sort.Sort(versions) var data []string for _, version := range versions { data = append(data, versionMap[version]) } sort.Strings(malformed) return malformed, data }
SortSemanticVersion is used to sort semantic version
SortSemanticVersion
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func DeduplicateSliceStably(items []string) []string { data := make([]string, 0, len(items)) deduplicate := map[string]struct{}{} for _, val := range items { if _, exists := deduplicate[val]; !exists { deduplicate[val] = struct{}{} data = append(data, val) } } return data }
DeduplicateSliceStably is used to deduplicate slice items stably
DeduplicateSliceStably
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func ContainsEmpty(items ...string) bool { return Contains(items, "") }
ContainsEmpty is used to check whether items contains empty string
ContainsEmpty
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func Contains(items []string, target string) bool { for _, item := range items { if item == target { return true } } return false }
Contains is used to check whether the target is in items
Contains
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func SetToSlice(set map[string]struct{}) []string { data := make([]string, 0, len(set)) for key := range set { data = append(data, key) } return data }
SetToSlice is used to convert set<string> to slice<string>
SetToSlice
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func GetMapKeys(dict map[string]string) []string { data := make([]string, 0, len(dict)) for key := range dict { data = append(data, key) } return data }
GetMapKeys is used to get the keys of map
GetMapKeys
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func GetExitCode(err error) int { if exitErr, ok := err.(*exec.ExitError); ok { // The program has exited with an exit code != 0 // This works on both Unix and Windows. Although package // syscall is generally platform dependent, WaitStatus is // defined for both Unix and Windows and in both cases has // an ExitStatus() method with the same signature. if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { return status.ExitStatus() } } return 1 }
GetExitCode is used to parse exit code from cmd error
GetExitCode
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func IsRegularVersion(s string) bool { return regexpRegularVersion.MatchString(s) }
IsRegularVersion is used to determine whether the version number is a regular version number Regular: va.b.c, and a, b, c are all numbers
IsRegularVersion
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func RenderWithEnv(s string, ext map[string]string) string { matches := regexpEnvironmentVar.FindAllString(s, -1) for _, match := range matches { key := match[1:] val := ext[key] if val == "" { val = os.Getenv(key) } if val != "" { s = strings.ReplaceAll(s, match, val) } } return s }
RenderWithEnv is used to render string with env
RenderWithEnv
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func RenderPathWithEnv(path string, ext map[string]string) string { return filepath.Clean(RenderWithEnv(path, ext)) }
RenderPathWithEnv is used to render path with environment
RenderPathWithEnv
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func SplitGoPackageVersion(pkg string) (path string, version string, ok bool) { i := strings.Index(pkg, "@") if i == -1 { return "", "", false } return pkg[:i], pkg[i+1:], true }
SplitGoPackageVersion is used to split go package version
SplitGoPackageVersion
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func JoinGoPackageVersion(path, version string) string { return strings.Join([]string{ path, version, }, "@") }
JoinGoPackageVersion is used to join go path and versions
JoinGoPackageVersion
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func GetBinaryFileName(name string) string { if runtime.GOOS == "windows" { if !strings.HasSuffix(name, ".exe") { return name + ".exe" } return name } return name }
GetBinaryFileName is used to get os based binary file name
GetBinaryFileName
go
storyicon/powerproto
pkg/util/util.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/util.go
Apache-2.0
func (s *progressBar) SetPrefix(format string, args ...interface{}) { s.prefix = fmt.Sprintf(format, args...) }
SetPrefix is used to set the prefix of progress bar
SetPrefix
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func (s *progressBar) SetSuffix(format string, args ...interface{}) { s.suffix = fmt.Sprintf(format, args...) }
SetSuffix is used to set the suffix of progress bar
SetSuffix
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func (s *progressBar) Incr() { s.bar.Increment() }
Incr is used to increase progress
Incr
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func (s *progressBar) Wait() { s.container.Wait() }
Wait is used to wait for the rendering of the progress bar to complete
Wait
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func (f *fakeProgressbar) Wait() {}
Wait is used to wait for the rendering of the progress bar to complete
Wait
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func (f *fakeProgressbar) SetPrefix(format string, args ...interface{}) { f.prefix = fmt.Sprintf(format, args...) }
SetSuffix is used to set the prefix of progress bar
SetPrefix
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func (f *fakeProgressbar) SetSuffix(format string, args ...interface{}) { f.suffix = fmt.Sprintf(format, args...) f.LogInfo(map[string]interface{}{ "progress": fmt.Sprintf("%3.f", float64(f.current)/float64(f.total)*100), "stage": f.prefix, }, f.suffix) }
SetSuffix is used to set the suffix of progress bar
SetSuffix
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func GetProgressBar(ctx context.Context, count int) ProgressBar { if consts.IsDebugMode(ctx) { return newFakeProgressbar(count) } var progressBar *progressBar container := mpb.New() bar := container.Add(int64(count), mpb.NewBarFiller(mpb.BarStyle().Lbound("["). Filler(color.GreenString("=")). Tip(color.GreenString(">")).Padding(" ").Rbound("]")), mpb.PrependDecorators( func() decor.Decorator { frames := getSpinner() var count uint return decor.Any(func(statistics decor.Statistics) string { if statistics.Completed { return frames[0] } frame := frames[count%uint(len(frames))] count++ return frame }) }(), decor.Any(func(statistics decor.Statistics) string { if progressBar != nil { return progressBar.prefix } return "" }), ), mpb.AppendDecorators( decor.NewPercentage("%d "), decor.Any(func(statistics decor.Statistics) string { if progressBar != nil { return fmt.Sprintf("(%d/%d) %s", statistics.Current, count, progressBar.suffix) } return "" }), ), mpb.BarWidth(15), ) progressBar = newEmbedProgressBar(container, bar) return progressBar }
GetProgressBar is used to get progress bar
GetProgressBar
go
storyicon/powerproto
pkg/util/progressbar/progressbar.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/progressbar/progressbar.go
Apache-2.0
func NewConfig() *Config { return &Config{ Pretty: false, Level: LevelDebug, } }
NewConfig is used to init config with default values
NewConfig
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func NewDefault(component string) Logger { logger, err := New(NewConfig(), component, prometheus.DefaultRegisterer) if err != nil { panic(err) } return logger }
NewDefault is used to initialize a simple Logger
NewDefault
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func New(cfg *Config, component string, registerer prometheus.Registerer) (Logger, error) { if cfg == nil { cfg = NewConfig() } service := &BasicLogger{ cfg: cfg, component: component, registerer: registerer, } return service, nil }
New is used to init service
New
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func (b *BasicLogger) LogDebug(fields map[string]interface{}, format string, args ...interface{}) { b.log(LevelDebug, fields, format, args...) }
LogDebug print a message with debug level.
LogDebug
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func (b *BasicLogger) LogInfo(fields map[string]interface{}, format string, args ...interface{}) { b.log(LevelInfo, fields, format, args...) }
LogInfo print a message with info level.
LogInfo
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func (b *BasicLogger) LogWarn(fields map[string]interface{}, format string, args ...interface{}) { b.log(LevelWarn, fields, format, args...) }
LogWarn print a message with warn level.
LogWarn
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func (b *BasicLogger) LogError(fields map[string]interface{}, format string, args ...interface{}) { b.log(LevelError, fields, format, args...) }
LogError print a message with error level.
LogError
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func (b *BasicLogger) LogFatal(fields map[string]interface{}, format string, args ...interface{}) { b.log(LevelFatal, fields, format, args...) }
LogFatal print a message with fatal level.
LogFatal
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func (b *BasicLogger) NewLogger(component string) Logger { name := strings.Join([]string{b.component, component}, ".") logger, err := New(b.cfg, name, b.registerer) if err != nil { b.LogWarn(map[string]interface{}{ "name": name, }, "failed to extend logger: %s", err) return b } return logger }
NewLogger is used to derive a new child Logger
NewLogger
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func (b *BasicLogger) SetLogLevel(level Level) Logger { b.cfg.Level = level return b }
SetLogLevel is used to set log level
SetLogLevel
go
storyicon/powerproto
pkg/util/logger/logger.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/logger/logger.go
Apache-2.0
func Execute(ctx context.Context, log logger.Logger, dir string, name string, arguments []string, env []string) ([]byte, error) { cmd := exec.CommandContext(ctx, name, arguments...) cmd.Env = append(os.Environ(), env...) cmd.Dir = dir if consts.IsDryRun(ctx) && !consts.IsIgnoreDryRun(ctx) { log.LogInfo(map[string]interface{}{ "command": cmd.String(), "dir": cmd.Dir, }, consts.TextDryRun) return nil, nil } var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr log.LogDebug(map[string]interface{}{ "command": cmd.String(), "dir": cmd.Dir, }, consts.TextExecuteCommand) if err := cmd.Run(); err != nil { return nil, &ErrCommandExec{ Err: err, Dir: cmd.Dir, Command: cmd.String(), ExitCode: util.GetExitCode(err), Stdout: stdout.String(), Stderr: stderr.String(), } } return stdout.Bytes(), nil }
Execute is used to execute commands, return stdout and execute errors
Execute
go
storyicon/powerproto
pkg/util/command/command.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/command/command.go
Apache-2.0
func (err *ErrCommandExec) Error() string { return fmt.Sprintf("failed to execute %s in %s, stderr: %s, exit code %d, %s", err.Command, err.Dir, err.Stderr, err.ExitCode, err.Err, ) }
Error implements the error interface
Error
go
storyicon/powerproto
pkg/util/command/command.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/command/command.go
Apache-2.0
func (s *Buffer) Write(p []byte) (n int, err error) { s.mutex.Lock() defer s.mutex.Unlock() return s.writer.Write(p) }
Write implements the io.Writer
Write
go
storyicon/powerproto
pkg/util/concurrent/buffer.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/concurrent/buffer.go
Apache-2.0
func NewBuffer(writer io.Writer) io.Writer { return &Buffer{writer: writer} }
NewBuffer is used to create a new buffer
NewBuffer
go
storyicon/powerproto
pkg/util/concurrent/buffer.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/concurrent/buffer.go
Apache-2.0
func NewErrGroup(ctx context.Context, concurrency int) *ErrGroup { ctx, cancel := context.WithCancel(ctx) return &ErrGroup{ ctx: ctx, cancel: cancel, limit: make(chan struct{}, concurrency), } }
NewErrGroup is used to create a new ErrGroup
NewErrGroup
go
storyicon/powerproto
pkg/util/concurrent/errgroup.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/concurrent/errgroup.go
Apache-2.0
func (g *ErrGroup) Wait() error { g.wg.Wait() g.cancel() return g.err }
Wait is used to wait ErrGroup finish
Wait
go
storyicon/powerproto
pkg/util/concurrent/errgroup.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/concurrent/errgroup.go
Apache-2.0
func (g *ErrGroup) Go(f func(ctx context.Context) error) { if g.err != nil { return } g.limit <- struct{}{} g.wg.Add(1) go func() { defer func() { <-g.limit g.wg.Done() }() if err := f(g.ctx); err != nil { g.cancel() g.errOnce.Do(func() { g.err = err g.cancel() }) } }() }
Go is used to start a new goroutine
Go
go
storyicon/powerproto
pkg/util/concurrent/errgroup.go
https://github.com/storyicon/powerproto/blob/master/pkg/util/concurrent/errgroup.go
Apache-2.0
func GetHomeDir() string { return homeDir }
GetHomeDir is used to get cached homeDir
GetHomeDir
go
storyicon/powerproto
pkg/consts/consts.go
https://github.com/storyicon/powerproto/blob/master/pkg/consts/consts.go
Apache-2.0
func PathForGlobalConfig() string { return filepath.Join(GetHomeDir(), ConfigFileName) }
PathForGlobalConfig is used to get path of global config
PathForGlobalConfig
go
storyicon/powerproto
pkg/consts/consts.go
https://github.com/storyicon/powerproto/blob/master/pkg/consts/consts.go
Apache-2.0
func WithPerCommandTimeout(ctx context.Context, timeout time.Duration) context.Context { return context.WithValue(ctx, perCommandTimeout{}, timeout) }
WithPerCommandTimeout is used to inject per command timeout
WithPerCommandTimeout
go
storyicon/powerproto
pkg/consts/context.go
https://github.com/storyicon/powerproto/blob/master/pkg/consts/context.go
Apache-2.0
func WithDebugMode(ctx context.Context) context.Context { return context.WithValue(ctx, debugMode{}, "true") }
WithDebugMode is used to set debug mode
WithDebugMode
go
storyicon/powerproto
pkg/consts/context.go
https://github.com/storyicon/powerproto/blob/master/pkg/consts/context.go
Apache-2.0
func WithIgnoreDryRun(ctx context.Context) context.Context { return context.WithValue(ctx, ignoreDryRun{}, "true") }
WithIgnoreDryRun is used to inject ignore dryRun flag into context
WithIgnoreDryRun
go
storyicon/powerproto
pkg/consts/context.go
https://github.com/storyicon/powerproto/blob/master/pkg/consts/context.go
Apache-2.0
func WithDisableAction(ctx context.Context) context.Context { return context.WithValue(ctx, disableAction{}, "true") }
WithDisableAction is used to disable post action/shell
WithDisableAction
go
storyicon/powerproto
pkg/consts/context.go
https://github.com/storyicon/powerproto/blob/master/pkg/consts/context.go
Apache-2.0
func WithDryRun(ctx context.Context) context.Context { ctx = WithDebugMode(ctx) return context.WithValue(ctx, dryRun{}, "true") }
WithDryRun is used to inject dryRun flag into context
WithDryRun
go
storyicon/powerproto
pkg/consts/context.go
https://github.com/storyicon/powerproto/blob/master/pkg/consts/context.go
Apache-2.0