code
stringlengths 11
335k
| docstring
stringlengths 20
11.8k
| func_name
stringlengths 1
100
| language
stringclasses 1
value | repo
stringclasses 245
values | path
stringlengths 4
144
| url
stringlengths 43
214
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
func (in *VulnerabilityReportData) DeepCopy() *VulnerabilityReportData {
if in == nil {
return nil
}
out := new(VulnerabilityReportData)
in.DeepCopyInto(out)
return out
} | DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VulnerabilityReportData. | DeepCopy | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | Apache-2.0 |
func (in *VulnerabilityReportList) DeepCopyInto(out *VulnerabilityReportList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VulnerabilityReport, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
} | DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. | DeepCopyInto | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | Apache-2.0 |
func (in *VulnerabilityReportList) DeepCopy() *VulnerabilityReportList {
if in == nil {
return nil
}
out := new(VulnerabilityReportList)
in.DeepCopyInto(out)
return out
} | DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VulnerabilityReportList. | DeepCopy | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | Apache-2.0 |
func (in *VulnerabilityReportList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
} | DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. | DeepCopyObject | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | Apache-2.0 |
func (in *VulnerabilitySummary) DeepCopyInto(out *VulnerabilitySummary) {
*out = *in
} | DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. | DeepCopyInto | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | Apache-2.0 |
func (in *VulnerabilitySummary) DeepCopy() *VulnerabilitySummary {
if in == nil {
return nil
}
out := new(VulnerabilitySummary)
in.DeepCopyInto(out)
return out
} | DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VulnerabilitySummary. | DeepCopy | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/zz_generated.deepcopy.go | Apache-2.0 |
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&VulnerabilityReport{},
&VulnerabilityReportList{},
&ConfigAuditReport{},
&ConfigAuditReportList{},
&ClusterConfigAuditReport{},
&ClusterConfigAuditReportList{},
&ClusterComplianceReport{},
&ClusterComplianceReportList{},
&ExposedSecretReport{},
&ExposedSecretReportList{},
&RbacAssessmentReport{},
&RbacAssessmentReportList{},
&ClusterRbacAssessmentReport{},
&ClusterRbacAssessmentReportList{},
&InfraAssessmentReport{},
&InfraAssessmentReportList{},
&ClusterInfraAssessmentReport{},
&ClusterInfraAssessmentReportList{},
&SbomReport{},
&SbomReportList{},
&ClusterSbomReport{},
&ClusterSbomReportList{},
&ClusterVulnerabilityReport{},
&ClusterVulnerabilityReportList{},
)
meta.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
} | Adds the list of known types to Scheme. | addKnownTypes | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/register.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/register.go | Apache-2.0 |
func StringToSeverity(name string) (Severity, error) {
s := strings.ToUpper(name)
switch s {
case "CRITICAL", "HIGH", "MEDIUM", "LOW", "NONE", "UNKNOWN":
return Severity(s), nil
default:
return "", fmt.Errorf("unrecognized name literal: %s", name)
}
} | StringToSeverity returns the enum constant of Severity with the specified
name. The name must match exactly an identifier used to declare an enum
constant. (Extraneous whitespace characters are not permitted.) | StringToSeverity | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/common_types.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/common_types.go | Apache-2.0 |
func ToComplianceSpec(cSpec Compliance) spec.ComplianceSpec {
specControls := make([]defsecTypes.Control, 0)
for _, control := range cSpec.Controls {
sChecks := make([]defsecTypes.SpecCheck, 0)
for _, scheck := range control.Checks {
sChecks = append(sChecks, defsecTypes.SpecCheck{ID: scheck.ID})
}
specControls = append(specControls, defsecTypes.Control{
ID: control.ID,
Name: control.Name,
Description: control.Description,
Checks: sChecks,
Severity: defsecTypes.Severity(control.Severity),
DefaultStatus: defsecTypes.ControlStatus(control.DefaultStatus),
})
}
compSpec := defsecTypes.Spec{
ID: cSpec.ID,
Title: cSpec.Title,
Description: cSpec.Description,
Version: cSpec.Version,
RelatedResources: cSpec.RelatedResources,
Controls: specControls,
}
return spec.ComplianceSpec{Spec: compSpec}
} | ToComplianceSpec map data from crd compliance spec to trivy compliance spec | ToComplianceSpec | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/compliance_types.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/compliance_types.go | Apache-2.0 |
func FromSummaryReport(sr *report.SummaryReport) *SummaryReport {
summaryControls := make([]ControlCheckSummary, 0)
for _, sr := range sr.SummaryControls {
summaryControls = append(summaryControls, ControlCheckSummary{
ID: sr.ID,
Name: sr.Name,
Severity: sr.Severity,
TotalFail: sr.TotalFail,
})
}
return &SummaryReport{
ID: sr.ID,
Title: sr.Title,
SummaryControls: summaryControls,
}
} | FromSummaryReport map data from trivy summary report to crd summary report | FromSummaryReport | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/compliance_types.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/compliance_types.go | Apache-2.0 |
func FromDetailReport(sr *report.ComplianceReport) *ComplianceReport {
controlResults := make([]*ControlCheckResult, 0)
for _, sr := range sr.Results {
checks := make([]ComplianceCheck, 0)
for _, r := range sr.Results {
for _, ms := range r.Misconfigurations {
checks = append(checks, ComplianceCheck{
ID: ms.AVDID,
Target: r.Target,
Title: ms.Title,
Description: ms.Description,
Severity: Severity(ms.Severity),
Category: "Kubernetes Security Check",
Remediation: ms.Resolution,
Messages: []string{ms.Message},
Success: false,
})
}
}
// mark check as pass of no misconfig issue found
if len(checks) == 0 {
checks = append(checks, ComplianceCheck{
Success: true,
})
}
controlResults = append(controlResults, &ControlCheckResult{
ID: sr.ID,
Name: sr.Name,
Severity: sr.Severity,
Description: sr.Description,
DefaultStatus: sr.DefaultStatus,
Checks: checks,
})
}
return &ComplianceReport{
ID: sr.ID,
Title: sr.Title,
Version: sr.Version,
Description: sr.Description,
RelatedResources: sr.RelatedResources,
Results: controlResults,
}
} | FromDetailReport map data from trivy summary report to crd summary report | FromDetailReport | go | aquasecurity/trivy-operator | pkg/apis/aquasecurity/v1alpha1/compliance_types.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/apis/aquasecurity/v1alpha1/compliance_types.go | Apache-2.0 |
func NewReadWriter(objectResolver *kube.ObjectResolver) ReadWriter {
return &readWriter{
ObjectResolver: objectResolver,
}
} | NewReadWriter constructs a new ReadWriter which is using the client package
provided by the controller-runtime libraries for interacting with the
Kubernetes API server. | NewReadWriter | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/io.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/io.go | Apache-2.0 |
func (ms *multiSorter) SortDesc(reports []v1alpha1.VulnerabilityReport) {
ms.reports = reports
sort.Stable(sort.Reverse(ms))
} | SortDesc sorts the argument slice according to the LessFunc functions passed to OrderedBy. | SortDesc | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/sort.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/sort.go | Apache-2.0 |
func OrderedBy(less ...LessFunc) *multiSorter {
return &multiSorter{
less: less,
}
} | OrderedBy returns a Sorter that sorts using the LessFunc functions, in order.
Call its Sort method to sort the data. | OrderedBy | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/sort.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/sort.go | Apache-2.0 |
func (ms *multiSorter) Len() int {
return len(ms.reports)
} | Len is part of sort.Interface. | Len | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/sort.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/sort.go | Apache-2.0 |
func (ms *multiSorter) Swap(i, j int) {
ms.reports[i], ms.reports[j] = ms.reports[j], ms.reports[i]
} | Swap is part of sort.Interface. | Swap | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/sort.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/sort.go | Apache-2.0 |
func (ms *multiSorter) Less(i, j int) bool {
p, q := &ms.reports[i], &ms.reports[j]
// Try all but the last comparison.
var k int
for k = 0; k < len(ms.less)-1; k++ {
less := ms.less[k]
switch {
case less(p, q):
// p < q, so we have a decision.
return true
case less(q, p):
// p > q, so we have a decision.
return false
}
// p == q; try the next comparison.
}
// All comparisons to here said "equal", so just return whatever
// the final comparison reports.
return ms.less[k](p, q)
} | Less is part of sort.Interface. It is implemented by looping along the
less functions until it finds a comparison that discriminates between
the two items (one is less than the other). Note that it can call the
less functions twice per call. We could change the functions to return
-1, 0, 1 and reduce the number of calls for greater efficiency: an
exercise for the reader. | Less | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/sort.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/sort.go | Apache-2.0 |
func (s *ScanJobBuilder) updateScanJobForWorkloadNamespace(job *batchv1.Job, podspec corev1.PodSpec, secrets []*corev1.Secret) {
operatorConfig := s.pluginContext.GetTrivyOperatorConfig()
if !operatorConfig.VulnerabilityScanJobsInSameNamespace() {
return
}
job.Namespace = s.object.GetNamespace()
job.Spec.Template.Spec.ServiceAccountName = podspec.ServiceAccountName
job.Spec.Template.Spec.ImagePullSecrets = podspec.ImagePullSecrets
for i := range secrets {
secrets[i].Namespace = s.object.GetNamespace()
}
} | When run scan job in workload namespace is enabled then this method will update scanjob spec with these changes
- namespace same as workload
- service account same as workload service account
- ImagePullSecret same as workload imagePullSecret | updateScanJobForWorkloadNamespace | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/builder.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/builder.go | Apache-2.0 |
func (r *trivyServerHealthChecker) TrivyServerAvaliable(serverURL string) (bool, error) {
up, err := r.serverUpFromCache()
if err != nil {
if !errors.Is(err, gcache.KeyNotFoundError) {
return false, err
}
err := r.checkServerUp(serverURL)
if err != nil {
return false, nil
}
return true, nil
}
return up, nil
} | TrivyServerAvaliable check if trivy server is available
1st cehck in cache and if not found it check for server health via http call the healthz api and update cache | TrivyServerAvaliable | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/controller/trivyserver.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/controller/trivyserver.go | Apache-2.0 |
func (r *httpChecker) ServerHealthy(serverURL string) error {
resp, err := http.Get(fmt.Sprintf("%s/%s", strings.TrimSuffix(serverURL, "/"), "healthz"))
if err != nil {
return err
}
defer func() {
_ = resp.Body.Close()
}()
if resp.StatusCode == http.StatusOK {
return nil
}
return errors.New("trivy server is not up and running")
} | ServerHealthy make http call the trivy server 'healthz' api for it availability | ServerHealthy | go | aquasecurity/trivy-operator | pkg/vulnerabilityreport/controller/trivyserver.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/vulnerabilityreport/controller/trivyserver.go | Apache-2.0 |
func NewReadWriter(objectResolver *kube.ObjectResolver) ReadWriter {
return &readWriter{
ObjectResolver: objectResolver,
}
} | NewReadWriter constructs a new ReadWriter which is using the client package
provided by the controller-runtime libraries for interacting with the
Kubernetes API server. | NewReadWriter | go | aquasecurity/trivy-operator | pkg/rbacassessment/io.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/rbacassessment/io.go | Apache-2.0 |
func GetContainerImagesFromPodSpec(spec corev1.PodSpec, skipInitContainers bool) ContainerImages {
images := ContainerImages{}
containers := make([]corev1.Container, 0)
containers = append(containers, spec.Containers...)
if !skipInitContainers {
containers = append(containers, spec.InitContainers...)
}
for _, container := range containers {
images[container.Name] = container.Image
}
// ephemeral container are not the same type as Containers/InitContainers,
// then we add it in a different loop
for _, c := range spec.EphemeralContainers {
images[c.Name] = c.Image
}
return images
} | GetContainerImagesFromPodSpec returns a map of container names
to container images from the specified v1.PodSpec. | GetContainerImagesFromPodSpec | go | aquasecurity/trivy-operator | pkg/kube/resources.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/resources.go | Apache-2.0 |
func GetContainerImagesFromContainersList(containers []corev1.Container) ContainerImages {
images := ContainerImages{}
for _, container := range containers {
images[container.Name] = container.Image
}
return images
} | GetContainerImagesFromContainersList returns a map of container names
to container images from the specified corev1.Container array. | GetContainerImagesFromContainersList | go | aquasecurity/trivy-operator | pkg/kube/resources.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/resources.go | Apache-2.0 |
func GetContainerImagesFromJob(job *batchv1.Job, completedContainers ...string) (ContainerImages, error) {
var containerImagesAsJSON string
var ok bool
if containerImagesAsJSON, ok = job.Annotations[trivyoperator.AnnotationContainerImages]; !ok {
return nil, fmt.Errorf("required annotation not set: %s", trivyoperator.AnnotationContainerImages)
}
containerImages := ContainerImages{}
err := containerImages.FromJSON(containerImagesAsJSON)
if err != nil {
return nil, fmt.Errorf("parsing annotation: %s: %w", trivyoperator.AnnotationContainerImages, err)
}
completed := make(map[string]string)
for _, container := range completedContainers {
if c, ok := containerImages[container]; ok {
completed[container] = c
}
}
return completed, nil
} | GetContainerImagesFromJob returns a map of container names
to container images from the specified v1.Job.
The mapping is encoded as JSON value of the AnnotationContainerImages
annotation. | GetContainerImagesFromJob | go | aquasecurity/trivy-operator | pkg/kube/resources.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/resources.go | Apache-2.0 |
func ComputeHash(obj any) string {
podSpecHasher := fnv.New32a()
DeepHashObject(podSpecHasher, obj)
return rand.SafeEncodeString(fmt.Sprint(podSpecHasher.Sum32()))
} | ComputeHash returns a hash value calculated from a given object.
The hash will be safe encoded to avoid bad words. | ComputeHash | go | aquasecurity/trivy-operator | pkg/kube/resources.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/resources.go | Apache-2.0 |
func DeepHashObject(hasher hash.Hash, objectToWrite any) {
hasher.Reset()
printer := spew.ConfigState{
Indent: " ",
SortKeys: true,
DisableMethods: true,
SpewKeys: true,
}
_, _ = printer.Fprintf(hasher, "%#v", objectToWrite)
} | DeepHashObject writes specified object to hash using the spew library
which follows pointers and prints actual values of the nested objects
ensuring the hash does not change when a pointer changes. | DeepHashObject | go | aquasecurity/trivy-operator | pkg/kube/resources.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/resources.go | Apache-2.0 |
func AppendResourceLabels(configLabelsNames []string, resourceLabels, reportLabels map[string]string) {
for _, labelToInclude := range configLabelsNames {
if value, ok := resourceLabels[labelToInclude]; ok {
reportLabels[labelToInclude] = value
}
}
} | AppendResourceLabels match resource labels by config and append it to report labels | AppendResourceLabels | go | aquasecurity/trivy-operator | pkg/kube/labels.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/labels.go | Apache-2.0 |
func AppendCustomLabels(configCustomLabels, reportLabels map[string]string) {
for key, value := range configCustomLabels {
reportLabels[key] = value
}
} | AppendCustomLabels append custom labels to report | AppendCustomLabels | go | aquasecurity/trivy-operator | pkg/kube/labels.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/labels.go | Apache-2.0 |
func MapContainerNamesToDockerAuths(images ContainerImages, auths map[string]docker.Auth) (map[string]docker.Auth, error) {
wildcardServers := GetWildcardServers(auths)
mapping := make(map[string]docker.Auth)
for containerName, imageRef := range images {
server, err := docker.GetServerFromImageRef(imageRef)
if err != nil {
return nil, err
}
if auth, ok := auths[server]; ok {
mapping[containerName] = auth
}
if len(wildcardServers) > 0 {
if wildcardDomain := matchSubDomain(wildcardServers, server); wildcardDomain != "" {
if auth, ok := auths[wildcardDomain]; ok {
mapping[containerName] = auth
}
}
}
}
return mapping, nil
} | MapContainerNamesToDockerAuths creates the mapping from a container name to the Docker authentication
credentials for the specified kube.ContainerImages and image pull Secrets. | MapContainerNamesToDockerAuths | go | aquasecurity/trivy-operator | pkg/kube/secrets.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/secrets.go | Apache-2.0 |
func MapDockerRegistryServersToAuths(imagePullSecrets []corev1.Secret, multiSecretSupport bool) (map[string]docker.Auth, error) {
auths := make(map[string]docker.Auth)
for _, secret := range imagePullSecrets {
var data []byte
var hasRequiredData, isLegacy bool
switch secret.Type {
case corev1.SecretTypeDockerConfigJson:
data, hasRequiredData = secret.Data[corev1.DockerConfigJsonKey]
case corev1.SecretTypeDockercfg:
data, hasRequiredData = secret.Data[corev1.DockerConfigKey]
isLegacy = true
default:
continue
}
// Skip a secrets of type "kubernetes.io/dockerconfigjson" or "kubernetes.io/dockercfg" which does not contain
// the required ".dockerconfigjson" or ".dockercfg" key.
if !hasRequiredData {
continue
}
dockerConfig := &docker.Config{}
err := dockerConfig.Read(data, isLegacy)
if err != nil {
return nil, fmt.Errorf("reading %s or %s field of %q secret: %w", corev1.DockerConfigJsonKey, corev1.DockerConfigKey, secret.Namespace+"/"+secret.Name, err)
}
for authKey, auth := range dockerConfig.Auths {
server, err := docker.GetServerFromDockerAuthKey(authKey)
if err != nil {
return nil, err
}
if a, ok := auths[server]; multiSecretSupport && ok {
user := fmt.Sprintf("%s,%s", a.Username, auth.Username)
pass := fmt.Sprintf("%s,%s", a.Password, auth.Password)
auths[server] = docker.Auth{Username: user, Password: pass}
} else {
auths[server] = auth
}
}
}
return auths, nil
} | MapDockerRegistryServersToAuths creates the mapping from a Docker registry server
to the Docker authentication credentials for the specified slice of image pull Secrets. | MapDockerRegistryServersToAuths | go | aquasecurity/trivy-operator | pkg/kube/secrets.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/secrets.go | Apache-2.0 |
func NewSecretsReader(c client.Client) SecretsReader {
return &secretsReader{client: c}
} | NewSecretsReader constructs a new SecretsReader which is using the client
package provided by the controller-runtime libraries for interacting with
the Kubernetes API server. | NewSecretsReader | go | aquasecurity/trivy-operator | pkg/kube/secrets.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/secrets.go | Apache-2.0 |
func IsBuiltInWorkload(controller *metav1.OwnerReference) bool {
return controller != nil &&
(controller.Kind == string(KindReplicaSet) ||
controller.Kind == string(KindReplicationController) ||
controller.Kind == string(KindStatefulSet) ||
controller.Kind == string(KindDaemonSet) ||
controller.Kind == string(KindJob))
} | IsBuiltInWorkload returns true if the specified v1.OwnerReference
is a built-in Kubernetes workload, false otherwise. | IsBuiltInWorkload | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func IsWorkload(kind string) bool {
return kind == "Pod" ||
kind == "Deployment" ||
kind == "ReplicaSet" ||
kind == "ReplicationController" ||
kind == "StatefulSet" ||
kind == "DaemonSet" ||
kind == "Job" ||
kind == "CronJob"
} | IsWorkload returns true if the specified resource kinds represents Kubernetes
workload, false otherwise. | IsWorkload | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func IsClusterScopedKind(kind string) bool {
switch kind {
case string(KindClusterRole), string(KindClusterRoleBindings), string(KindCustomResourceDefinition), string(KindNode):
return true
default:
return false
}
} | IsClusterScopedKind returns true if the specified kind is ClusterRole,
ClusterRoleBinding, and CustomResourceDefinition.
TODO Use discovery client to have a generic implementation. | IsClusterScopedKind | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func ObjectRefToLabels(obj ObjectRef) map[string]string {
labels := map[string]string{
trivyoperator.LabelResourceKind: string(obj.Kind),
trivyoperator.LabelResourceNamespace: obj.Namespace,
}
if len(validation.IsValidLabelValue(obj.Name)) == 0 {
labels[trivyoperator.LabelResourceName] = obj.Name
} else {
labels[trivyoperator.LabelResourceNameHash] = ComputeHash(obj.Name)
}
return labels
} | ObjectRefToLabels encodes the specified ObjectRef as a set of labels.
If Object's name cannot be used as the value of the
trivy-operator.LabelResourceName label, as a fallback, this method will calculate
a hash of the Object's name and use it as the value of the
trivy-operator.LabelResourceNameHash label. | ObjectRefToLabels | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func ObjectToObjectMeta(obj client.Object, objectMeta *metav1.ObjectMeta) error {
if objectMeta.Labels == nil {
objectMeta.Labels = make(map[string]string)
}
objectMeta.Labels[trivyoperator.LabelResourceKind] = obj.GetObjectKind().GroupVersionKind().Kind
objectMeta.Labels[trivyoperator.LabelResourceNamespace] = obj.GetNamespace()
if len(validation.IsValidLabelValue(obj.GetName())) == 0 {
objectMeta.Labels[trivyoperator.LabelResourceName] = obj.GetName()
} else {
objectMeta.Labels[trivyoperator.LabelResourceNameHash] = ComputeHash(obj.GetName())
if objectMeta.Annotations == nil {
objectMeta.Annotations = make(map[string]string)
}
objectMeta.Annotations[trivyoperator.LabelResourceName] = obj.GetName()
}
return nil
} | ObjectToObjectMeta encodes the specified client.Object as a set of labels
and annotations added to the given ObjectMeta. | ObjectToObjectMeta | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func ComputeSpecHash(obj client.Object) (string, error) {
switch t := obj.(type) {
case *corev1.Pod, *appsv1.Deployment, *appsv1.ReplicaSet, *corev1.ReplicationController, *appsv1.StatefulSet, *appsv1.DaemonSet, *batchv1.CronJob, *batchv1beta1.CronJob, *batchv1.Job:
spec, err := GetPodSpec(obj)
if err != nil {
return "", err
}
return ComputeHash(spec), nil
case *corev1.Service:
return ComputeHash(obj), nil
case *corev1.ConfigMap:
return ComputeHash(obj), nil
case *rbacv1.Role:
return ComputeHash(obj), nil
case *rbacv1.RoleBinding:
return ComputeHash(obj), nil
case *networkingv1.NetworkPolicy:
return ComputeHash(obj), nil
case *networkingv1.Ingress:
return ComputeHash(obj), nil
case *corev1.ResourceQuota:
return ComputeHash(obj), nil
case *corev1.Node:
return ComputeHash(obj), nil
case *corev1.LimitRange:
return ComputeHash(obj), nil
case *rbacv1.ClusterRole:
return ComputeHash(obj), nil
case *rbacv1.ClusterRoleBinding:
return ComputeHash(obj), nil
case *apiextensionsv1.CustomResourceDefinition:
return ComputeHash(obj), nil
default:
return "", fmt.Errorf("computing spec hash of unsupported object: %T", t)
} | ComputeSpecHash computes hash of the specified K8s client.Object. The hash is
used to indicate whether the client.Object should be rescanned or not by
adding it as the trivy-operator.LabelResourceSpecHash label to an instance of a
security report. | ComputeSpecHash | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func GetPodSpec(obj client.Object) (corev1.PodSpec, error) {
switch t := obj.(type) {
case *corev1.Pod:
return (obj.(*corev1.Pod)).Spec, nil
case *appsv1.Deployment:
return (obj.(*appsv1.Deployment)).Spec.Template.Spec, nil
case *appsv1.ReplicaSet:
return (obj.(*appsv1.ReplicaSet)).Spec.Template.Spec, nil
case *corev1.ReplicationController:
return (obj.(*corev1.ReplicationController)).Spec.Template.Spec, nil
case *appsv1.StatefulSet:
return (obj.(*appsv1.StatefulSet)).Spec.Template.Spec, nil
case *appsv1.DaemonSet:
return (obj.(*appsv1.DaemonSet)).Spec.Template.Spec, nil
case *batchv1beta1.CronJob:
return (obj.(*batchv1beta1.CronJob)).Spec.JobTemplate.Spec.Template.Spec, nil
case *batchv1.CronJob:
return (obj.(*batchv1.CronJob)).Spec.JobTemplate.Spec.Template.Spec, nil
case *batchv1.Job:
return (obj.(*batchv1.Job)).Spec.Template.Spec, nil
default:
return corev1.PodSpec{}, fmt.Errorf("unsupported workload: %T", t)
} | GetPodSpec returns v1.PodSpec from the specified Kubernetes client.Object.
Returns error if the given client.Object is not a Kubernetes workload. | GetPodSpec | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func InitCompatibleMgr() (CompatibleMgr, error) {
cf := genericclioptions.NewConfigFlags(true)
restMapper, err := cf.ToRESTMapper()
if err != nil {
return nil, err
}
kindObjectMap := make(map[string]string)
for _, resource := range getCompatibleResources() {
gvk, err := restMapper.KindFor(schema.GroupVersionResource{Resource: resource})
if err != nil {
return nil, err
}
kindObjectMap[gvk.Kind] = gvk.String()
}
return &CompatibleObjectMapper{kindObjectMap: kindObjectMap}, nil
} | InitCompatibleMgr initializes a CompatibleObjectMapper who store a map the of supported kinds with it compatible Objects (group/api/kind)
it dynamically fetches the compatible k8s objects (group/api/kind) by resource from the cluster and store it in kind vs k8s object mapping
It will enable the operator to support old and new API resources based on cluster version support | InitCompatibleMgr | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func supportedObjectsByK8sKind(api string) client.Object {
switch api {
case apiBatchV1beta1CronJob:
return &batchv1beta1.CronJob{}
case apiBatchV1CronJob:
return &batchv1.CronJob{}
default:
return nil
}
} | return a map of supported object api per k8s version | supportedObjectsByK8sKind | go | aquasecurity/trivy-operator | pkg/kube/object.go | https://github.com/aquasecurity/trivy-operator/blob/master/pkg/kube/object.go | Apache-2.0 |
func main() {
// Fetch operator configuration early.
operatorConfig, err := etc.GetOperatorConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "error getting operator config: %v\n", err)
os.Exit(1)
}
// Initialize the logger based on the LogDevMode from the config.
log.SetLogger(zap.New(zap.UseDevMode(operatorConfig.LogDevMode)))
if err := run(operatorConfig); err != nil {
fmt.Fprintf(os.Stderr, "unable to run trivy operator: %v\n", err)
os.Exit(1)
}
} | main is the entrypoint of the Trivy Operator executable command. | main | go | aquasecurity/trivy-operator | cmd/trivy-operator/main.go | https://github.com/aquasecurity/trivy-operator/blob/master/cmd/trivy-operator/main.go | Apache-2.0 |
func VulnerabilityScannerBehavior(inputs *Inputs) func() {
return func() {
Context("When unmanaged Pod is created", func() {
var ctx context.Context
var pod *corev1.Pod
BeforeEach(func() {
ctx = context.Background()
pod = helper.NewPod().
WithRandomName("unmanaged-vuln-image").
WithNamespace(inputs.PrimaryNamespace).
WithContainer("vuln-image", "mirror.gcr.io/knqyf263/vuln-image:1.2.3", []string{"/bin/sh", "-c", "--"}, []string{"while true; do sleep 30; done;"}).
Build()
err := inputs.Create(ctx, pod)
Expect(err).ToNot(HaveOccurred())
})
It("Should create VulnerabilityReport", func() {
Eventually(inputs.HasVulnerabilityReportOwnedBy(pod), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, pod)
Expect(err).ToNot(HaveOccurred())
})
})
Context("When Deployment is created", func() {
var ctx context.Context
var deploy *appsv1.Deployment
BeforeEach(func() {
ctx = context.Background()
deploy = helper.NewDeployment().
WithRandomName(inputs.PrimaryWorkloadPrefix).
WithNamespace(inputs.PrimaryNamespace).
WithContainer("wordpress", "wordpress:4.9").
Build()
err := inputs.Create(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
Eventually(inputs.HasActiveReplicaSet(inputs.PrimaryNamespace, deploy.Name), inputs.AssertTimeout).Should(BeTrue())
})
It("Should create VulnerabilityReport", func() {
rs, err := inputs.GetActiveReplicaSetForDeployment(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Expect(rs).ToNot(BeNil())
Eventually(inputs.HasVulnerabilityReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
})
})
Context("When Deployment is rolling updated", func() {
var ctx context.Context
var deploy *appsv1.Deployment
BeforeEach(func() {
By("Creating Deployment wordpress")
ctx = context.Background()
deploy = helper.NewDeployment().
WithRandomName(inputs.PrimaryWorkloadPrefix).
WithNamespace(inputs.PrimaryNamespace).
WithContainer("wordpress", "wordpress:4.9").
Build()
err := inputs.Create(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
Eventually(inputs.HasActiveReplicaSet(inputs.PrimaryNamespace, deploy.Name), inputs.AssertTimeout).Should(BeTrue())
})
It("Should create VulnerabilityReport for new ReplicaSet", func() {
By("Getting current active ReplicaSet")
rs, err := inputs.GetActiveReplicaSetForDeployment(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Expect(rs).ToNot(BeNil())
By("Waiting for VulnerabilityReport")
Eventually(inputs.HasVulnerabilityReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
By("Updating deployment image to wordpress:6.7")
err = inputs.UpdateDeploymentImage(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Eventually(inputs.HasActiveReplicaSet(inputs.PrimaryNamespace, deploy.Name), inputs.AssertTimeout).Should(BeTrue())
By("Getting new active replicaset")
rs, err = inputs.GetActiveReplicaSetForDeployment(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Expect(rs).ToNot(BeNil())
By("Waiting for new VulnerabilityReport")
Eventually(inputs.HasVulnerabilityReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
})
})
Context("When CronJob is created", func() {
var ctx context.Context
var cronJob *batchv1.CronJob
BeforeEach(func() {
ctx = context.Background()
cronJob = &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Namespace: inputs.PrimaryNamespace,
Name: "hello-" + rand.String(5),
},
Spec: batchv1.CronJobSpec{
Schedule: "*/1 * * * *",
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "hello",
Image: "busybox",
Command: []string{
"/bin/sh",
"-c",
"date; echo Hello from the Kubernetes cluster",
},
},
},
},
},
},
},
},
}
err := inputs.Create(ctx, cronJob)
Expect(err).ToNot(HaveOccurred())
})
It("Should create VulnerabilityReport", func() {
Eventually(inputs.HasVulnerabilityReportOwnedBy(cronJob), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, cronJob)
Expect(err).ToNot(HaveOccurred())
})
})
// TODO Add scenario to test that VulnerabilityReport is recreated
// TODO Add scenario for workload with multiple containers
// TODO Add scenario for ReplicaSet
// TODO Add scenario for StatefulSet
// TODO Add scenario for DaemonSet
}
} | VulnerabilityScannerBehavior returns the container of specs that describe behavior
of a vulnerability scanner with the given inputs. | VulnerabilityScannerBehavior | go | aquasecurity/trivy-operator | tests/itest/trivy-operator/behavior/behavior.go | https://github.com/aquasecurity/trivy-operator/blob/master/tests/itest/trivy-operator/behavior/behavior.go | Apache-2.0 |
func ConfigurationCheckerBehavior(inputs *Inputs) func() {
return func() {
Context("When unmanaged Pod is created", func() {
var ctx context.Context
var pod *corev1.Pod
BeforeEach(func() {
ctx = context.Background()
pod = helper.NewPod().
WithRandomName("unmanaged-vuln-image").
WithNamespace(inputs.PrimaryNamespace).
WithContainer("vuln-image", "mirror.gcr.io/knqyf263/vuln-image:1.2.3", []string{"/bin/sh", "-c", "--"}, []string{"while true; do sleep 30; done;"}).
Build()
err := inputs.Create(ctx, pod)
Expect(err).ToNot(HaveOccurred())
})
It("Should create ConfigAuditReport", func() {
Eventually(inputs.HasConfigAuditReportOwnedBy(pod), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, pod)
Expect(err).ToNot(HaveOccurred())
})
})
Context("When Deployment is created", func() {
var ctx context.Context
var deploy *appsv1.Deployment
BeforeEach(func() {
ctx = context.Background()
deploy = helper.NewDeployment().
WithRandomName(inputs.PrimaryWorkloadPrefix).
WithNamespace(inputs.PrimaryNamespace).
WithContainer("wordpress", "wordpress:4.9").
Build()
err := inputs.Create(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
Eventually(inputs.HasActiveReplicaSet(inputs.PrimaryNamespace, deploy.Name), inputs.AssertTimeout).Should(BeTrue())
})
It("Should create ConfigAuditReport", func() {
rs, err := inputs.GetActiveReplicaSetForDeployment(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Expect(rs).ToNot(BeNil())
Eventually(inputs.HasConfigAuditReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
})
})
Context("When Deployment is rolling updated", func() {
var ctx context.Context
var deploy *appsv1.Deployment
BeforeEach(func() {
By("Creating Deployment wordpress")
ctx = context.Background()
deploy = helper.NewDeployment().
WithRandomName(inputs.PrimaryWorkloadPrefix).
WithNamespace(inputs.PrimaryNamespace).
WithContainer("wordpress", "wordpress:4.9").
Build()
err := inputs.Create(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
Eventually(inputs.HasActiveReplicaSet(inputs.PrimaryNamespace, deploy.Name), inputs.AssertTimeout).Should(BeTrue())
})
It("Should create ConfigAuditReport for new ReplicaSet", func() {
By("Getting current active ReplicaSet")
rs, err := inputs.GetActiveReplicaSetForDeployment(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Expect(rs).ToNot(BeNil())
By("Waiting for ConfigAuditReport")
Eventually(inputs.HasConfigAuditReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
By("Updating deployment image to wordpress:6.7")
err = inputs.UpdateDeploymentImage(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Eventually(inputs.HasActiveReplicaSet(inputs.PrimaryNamespace, deploy.Name), inputs.AssertTimeout).Should(BeTrue())
By("Getting new active replicaset")
rs, err = inputs.GetActiveReplicaSetForDeployment(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Expect(rs).ToNot(BeNil())
By("Waiting for new Config Audit Report")
Eventually(inputs.HasConfigAuditReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
})
})
Context("When CronJob is created", func() {
var ctx context.Context
var cronJob *batchv1.CronJob
BeforeEach(func() {
ctx = context.Background()
cronJob = &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Namespace: inputs.PrimaryNamespace,
Name: "hello-" + rand.String(5),
},
Spec: batchv1.CronJobSpec{
Schedule: "*/1 * * * *",
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "hello",
Image: "busybox",
Command: []string{
"/bin/sh",
"-c",
"date; echo Hello from the Kubernetes cluster",
},
},
},
},
},
},
},
},
}
err := inputs.Create(ctx, cronJob)
Expect(err).ToNot(HaveOccurred())
})
It("Should create ConfigAuditReport", func() {
Eventually(inputs.HasConfigAuditReportOwnedBy(cronJob), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, cronJob)
Expect(err).ToNot(HaveOccurred())
})
})
Context("When ConfigAuditReport is deleted", func() {
var ctx context.Context
var deploy *appsv1.Deployment
BeforeEach(func() {
By("Creating Deployment")
ctx = context.Background()
deploy = helper.NewDeployment().
WithRandomName(inputs.PrimaryWorkloadPrefix).
WithNamespace(inputs.PrimaryNamespace).
WithContainer("wordpress", "wordpress:4.9").
Build()
err := inputs.Create(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
Eventually(inputs.HasActiveReplicaSet(inputs.PrimaryNamespace, deploy.Name), inputs.AssertTimeout).Should(BeTrue())
})
It("Should rescan Deployment when ConfigAuditReport is deleted", func() {
By("Getting active ReplicaSet")
rs, err := inputs.GetActiveReplicaSetForDeployment(inputs.PrimaryNamespace, deploy.Name)
Expect(err).ToNot(HaveOccurred())
Expect(rs).ToNot(BeNil())
By("Waiting for ConfigAuditReport")
Eventually(inputs.HasConfigAuditReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
By("Deleting ConfigAuditReport")
err = inputs.DeleteConfigAuditReportOwnedBy(rs)
Expect(err).ToNot(HaveOccurred())
By("Waiting for new ConfigAuditReport")
Eventually(inputs.HasConfigAuditReportOwnedBy(rs), inputs.AssertTimeout).Should(BeTrue())
})
AfterEach(func() {
err := inputs.Delete(ctx, deploy)
Expect(err).ToNot(HaveOccurred())
})
})
// TODO Add scenario for workload with multiple containers
// TODO Add scenario for ReplicaSet
// TODO Add scenario for StatefulSet
// TODO Add scenario for DaemonSet
Context("When Service is created", func() {
var ctx context.Context
var svc *corev1.Service
BeforeEach(func() {
ctx = context.Background()
svc = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: inputs.PrimaryNamespace,
Name: "nginx-" + rand.String(5),
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"app": "nginx",
},
Ports: []corev1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromInt(80),
Protocol: corev1.ProtocolTCP,
},
},
},
}
err := inputs.Create(ctx, svc)
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
err := inputs.Delete(ctx, svc)
Expect(err).ToNot(HaveOccurred())
})
})
}
} | ConfigurationCheckerBehavior returns the container of specs that describe behavior
of a configuration checker with the given inputs. | ConfigurationCheckerBehavior | go | aquasecurity/trivy-operator | tests/itest/trivy-operator/behavior/behavior.go | https://github.com/aquasecurity/trivy-operator/blob/master/tests/itest/trivy-operator/behavior/behavior.go | Apache-2.0 |
func IsVulnerabilityReportForContainerOwnedBy(containerName string, owner client.Object) types.GomegaMatcher {
return &vulnerabilityReportMatcher{
scheme: trivyoperator.NewScheme(),
containerName: containerName,
owner: owner,
}
} | IsVulnerabilityReportForContainerOwnedBy succeeds if a v1alpha1.VulnerabilityReport has a valid structure,
corresponds to the given container and is owned by the specified client.Object.
Note: This matcher is not suitable for unit tests because it does not perform a strict validation
of the actual v1alpha1.VulnerabilityReport. | IsVulnerabilityReportForContainerOwnedBy | go | aquasecurity/trivy-operator | tests/itest/matcher/matcher.go | https://github.com/aquasecurity/trivy-operator/blob/master/tests/itest/matcher/matcher.go | Apache-2.0 |
func IsConfigAuditReportOwnedBy(owner client.Object) types.GomegaMatcher {
return &configAuditReportMatcher{
owner: owner,
}
} | IsConfigAuditReportOwnedBy succeeds if a v1alpha1.ConfigAuditReport has a valid structure,
and is owned by the specified client.Object.
Note: This matcher is not suitable for unit tests because it does not perform a strict validation
of the actual v1alpha1.ConfigAuditReport. | IsConfigAuditReportOwnedBy | go | aquasecurity/trivy-operator | tests/itest/matcher/matcher.go | https://github.com/aquasecurity/trivy-operator/blob/master/tests/itest/matcher/matcher.go | Apache-2.0 |
func item2Builder() interface{} {
return &item2{}
} | item2Builder creates a new item and returns a pointer to it.
This is used when we load a segment of the queue from disk. | item2Builder | go | joncrlsn/dque | queue_test.go | https://github.com/joncrlsn/dque/blob/master/queue_test.go | MIT |
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
tb.FailNow()
}
} | assert fails the test if the condition is false. | assert | go | joncrlsn/dque | queue_test.go | https://github.com/joncrlsn/dque/blob/master/queue_test.go | MIT |
func item1Builder() interface{} {
return &item1{}
} | item1Builder creates a new item and returns a pointer to it.
This is used when we load a queue from disk. | item1Builder | go | joncrlsn/dque | segment_test.go | https://github.com/joncrlsn/dque/blob/master/segment_test.go | MIT |
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
tb.FailNow()
}
} | assert fails the test if the condition is false. | assert | go | joncrlsn/dque | segment_test.go | https://github.com/joncrlsn/dque/blob/master/segment_test.go | MIT |
func ItemBuilder() interface{} {
return &Item{}
} | ItemBuilder creates a new item and returns a pointer to it.
This is used when we load a segment of the queue from disk. | ItemBuilder | go | joncrlsn/dque | example_test.go | https://github.com/joncrlsn/dque/blob/master/example_test.go | MIT |
func item3Builder() interface{} {
return &item3{}
} | item3Builder creates a new item and returns a pointer to it.
This is used when we load a segment of the queue from disk. | item3Builder | go | joncrlsn/dque | benchmark_test.go | https://github.com/joncrlsn/dque/blob/master/benchmark_test.go | MIT |
func New(name string, dirPath string, itemsPerSegment int, builder func() interface{}) (*DQue, error) {
// Validation
if len(name) == 0 {
return nil, errors.New("the queue name requires a value")
}
if len(dirPath) == 0 {
return nil, errors.New("the queue directory requires a value")
}
if !dirExists(dirPath) {
return nil, errors.New("the given queue directory is not valid: " + dirPath)
}
fullPath := path.Join(dirPath, name)
if dirExists(fullPath) {
return nil, errors.New("the given queue directory already exists: " + fullPath + ". Use Open instead")
}
if err := os.Mkdir(fullPath, 0755); err != nil {
return nil, errors.Wrap(err, "error creating queue directory "+fullPath)
}
q := DQue{Name: name, DirPath: dirPath}
q.fullPath = fullPath
q.config.ItemsPerSegment = itemsPerSegment
q.builder = builder
q.emptyCond = sync.NewCond(&q.mutex)
if err := q.lock(); err != nil {
return nil, err
}
if err := q.load(); err != nil {
er := q.fileLock.Unlock()
if er != nil {
return nil, er
}
return nil, err
}
return &q, nil
} | New creates a new durable queue | New | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func Open(name string, dirPath string, itemsPerSegment int, builder func() interface{}) (*DQue, error) {
// Validation
if len(name) == 0 {
return nil, errors.New("the queue name requires a value")
}
if len(dirPath) == 0 {
return nil, errors.New("the queue directory requires a value")
}
if !dirExists(dirPath) {
return nil, errors.New("the given queue directory is not valid (" + dirPath + ")")
}
fullPath := path.Join(dirPath, name)
if !dirExists(fullPath) {
return nil, errors.New("the given queue does not exist (" + fullPath + ")")
}
q := DQue{Name: name, DirPath: dirPath}
q.fullPath = fullPath
q.config.ItemsPerSegment = itemsPerSegment
q.builder = builder
q.emptyCond = sync.NewCond(&q.mutex)
if err := q.lock(); err != nil {
return nil, err
}
if err := q.load(); err != nil {
er := q.fileLock.Unlock()
if er != nil {
return nil, er
}
return nil, err
}
return &q, nil
} | Open opens an existing durable queue. | Open | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func NewOrOpen(name string, dirPath string, itemsPerSegment int, builder func() interface{}) (*DQue, error) {
// Validation
if len(name) == 0 {
return nil, errors.New("the queue name requires a value")
}
if len(dirPath) == 0 {
return nil, errors.New("the queue directory requires a value")
}
if !dirExists(dirPath) {
return nil, errors.New("the given queue directory is not valid (" + dirPath + ")")
}
fullPath := path.Join(dirPath, name)
if dirExists(fullPath) {
return Open(name, dirPath, itemsPerSegment, builder)
}
return New(name, dirPath, itemsPerSegment, builder)
} | NewOrOpen either creates a new queue or opens an existing durable queue. | NewOrOpen | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) Close() error {
// only allow Close while no other function is active
q.mutex.Lock()
defer q.mutex.Unlock()
if q.fileLock == nil {
return ErrQueueClosed
}
err := q.fileLock.Close()
if err != nil {
return err
}
// Finally mark this instance as closed to prevent any further access
q.fileLock = nil
// Wake-up any waiting goroutines for blocking queue access - they should get a ErrQueueClosed
q.emptyCond.Broadcast()
// Close the first and last segments' file handles
if err = q.firstSegment.close(); err != nil {
return err
}
if q.firstSegment != q.lastSegment {
if err = q.lastSegment.close(); err != nil {
return err
}
}
// Safe-guard ourself from accidentally using segments after closing the queue
q.firstSegment = nil
q.lastSegment = nil
return nil
} | Close releases the lock on the queue rendering it unusable for further usage by this instance.
Close will return an error if it has already been called. | Close | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) Enqueue(obj interface{}) error {
// This is heavy-handed but its safe
q.mutex.Lock()
defer q.mutex.Unlock()
if q.fileLock == nil {
return ErrQueueClosed
}
// If this segment is full then create a new one
if q.lastSegment.sizeOnDisk() >= q.config.ItemsPerSegment {
// We have filled our last segment to capacity, so create a new one
seg, err := newQueueSegment(q.fullPath, q.lastSegment.number+1, q.turbo, q.builder)
if err != nil {
return errors.Wrapf(err, "error creating new queue segment: %d.", q.lastSegment.number+1)
}
// If the last segment is not the first segment
// then we need to close the file.
if q.firstSegment != q.lastSegment {
var err = q.lastSegment.close()
if err != nil {
return errors.Wrapf(err, "error closing previous segment file #%d.", q.lastSegment.number)
}
}
// Replace the last segment with the new one
q.lastSegment = seg
}
// Add the object to the last segment
if err := q.lastSegment.add(obj); err != nil {
return errors.Wrap(err, "error adding item to the last segment")
}
// Wakeup any goroutine that is currently waiting for an item to be enqueued
q.emptyCond.Broadcast()
return nil
} | Enqueue adds an item to the end of the queue | Enqueue | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) Dequeue() (interface{}, error) {
// This is heavy-handed but its safe
q.mutex.Lock()
defer q.mutex.Unlock()
return q.dequeueLocked()
} | Dequeue removes and returns the first item in the queue.
When the queue is empty, nil and dque.ErrEmpty are returned. | Dequeue | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) Peek() (interface{}, error) {
// This is heavy-handed but it is safe
q.mutex.Lock()
defer q.mutex.Unlock()
return q.peekLocked()
} | Peek returns the first item in the queue without dequeueing it.
When the queue is empty, nil and dque.ErrEmpty are returned.
Do not use this method with multiple dequeueing threads or you may regret it. | Peek | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) DequeueBlock() (interface{}, error) {
q.mutex.Lock()
defer q.mutex.Unlock()
for {
obj, err := q.dequeueLocked()
if err == ErrEmpty {
q.emptyCond.Wait()
// Wait() atomically unlocks mutexEmptyCond and suspends execution of the calling goroutine.
// Receiving the signal does not guarantee an item is available, let's loop and check again.
continue
} else if err != nil {
return nil, err
}
return obj, nil
}
} | DequeueBlock behaves similar to Dequeue, but is a blocking call until an item is available. | DequeueBlock | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) PeekBlock() (interface{}, error) {
q.mutex.Lock()
defer q.mutex.Unlock()
for {
obj, err := q.peekLocked()
if err == ErrEmpty {
q.emptyCond.Wait()
// Wait() atomically unlocks mutexEmptyCond and suspends execution of the calling goroutine.
// Receiving the signal does not guarantee an item is available, let's loop and check again.
continue
} else if err != nil {
return nil, err
}
return obj, nil
}
} | PeekBlock behaves similar to Peek, but is a blocking call until an item is available. | PeekBlock | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) Size() int {
if q.fileLock == nil {
return 0
}
// This is heavy-handed but it is safe
q.mutex.Lock()
defer q.mutex.Unlock()
return q.SizeUnsafe()
} | Size locks things up while calculating so you are guaranteed an accurate
size... unless you have changed the itemsPerSegment value since the queue
was last empty. Then it could be wildly inaccurate. | Size | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) SizeUnsafe() int {
if q.fileLock == nil {
return 0
}
if q.firstSegment.number == q.lastSegment.number {
return q.firstSegment.size()
}
numSegmentsBetween := q.lastSegment.number - q.firstSegment.number - 1
return q.firstSegment.size() + (numSegmentsBetween * q.config.ItemsPerSegment) + q.lastSegment.size()
} | SizeUnsafe returns the approximate number of items in the queue. Use Size() if
having the exact size is important to your use-case.
The return value could be wildly inaccurate if the itemsPerSegment value has
changed since the queue was last empty.
Also, because this method is not synchronized, the size may change after
entering this method. | SizeUnsafe | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) SegmentNumbers() (int, int) {
if q.fileLock == nil {
return 0, 0
}
return q.firstSegment.number, q.lastSegment.number
} | SegmentNumbers returns the number of both the first last segmment.
There is likely no use for this information other than testing. | SegmentNumbers | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) Turbo() bool {
return q.turbo
} | Turbo returns true if the turbo flag is on. Having turbo on speeds things
up significantly. | Turbo | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) TurboOn() error {
// This is heavy-handed but it is safe
q.mutex.Lock()
defer q.mutex.Unlock()
if q.fileLock == nil {
return ErrQueueClosed
}
if q.turbo {
return errors.New("DQue.TurboOn() is not valid when turbo is on")
}
q.turbo = true
q.firstSegment.turboOn()
q.lastSegment.turboOn()
return nil
} | TurboOn allows the filesystem to decide when to sync file changes to disk.
Throughput is greatly increased by turning turbo on, however there is some
risk of losing data if a power-loss occurs.
If turbo is already on an error is returned | TurboOn | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) TurboOff() error {
// This is heavy-handed but it is safe
q.mutex.Lock()
defer q.mutex.Unlock()
if q.fileLock == nil {
return ErrQueueClosed
}
if !q.turbo {
return errors.New("DQue.TurboOff() is not valid when turbo is off")
}
if err := q.firstSegment.turboOff(); err != nil {
return err
}
if err := q.lastSegment.turboOff(); err != nil {
return err
}
q.turbo = false
return nil
} | TurboOff re-enables the "safety" mode that syncs every file change to disk as
they happen.
If turbo is already off an error is returned | TurboOff | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) TurboSync() error {
// This is heavy-handed but it is safe
q.mutex.Lock()
defer q.mutex.Unlock()
if q.fileLock == nil {
return ErrQueueClosed
}
if !q.turbo {
return errors.New("DQue.TurboSync() is inappropriate when turbo is off")
}
if err := q.firstSegment.turboSync(); err != nil {
return errors.Wrap(err, "unable to sync changes to disk")
}
if err := q.lastSegment.turboSync(); err != nil {
return errors.Wrap(err, "unable to sync changes to disk")
}
return nil
} | TurboSync allows you to fsync changes to disk, but only if turbo is on.
If turbo is off an error is returned | TurboSync | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (q *DQue) load() error {
// Find all queue files
files, err := ioutil.ReadDir(q.fullPath)
if err != nil {
return errors.Wrap(err, "unable to read files in "+q.fullPath)
}
// Find the smallest and the largest file numbers
minNum := math.MaxInt32
maxNum := 0
for _, f := range files {
if !f.IsDir() && filePattern.MatchString(f.Name()) {
// Extract number out of the filename
fileNumStr := filePattern.FindStringSubmatch(f.Name())[1]
fileNum, _ := strconv.Atoi(fileNumStr)
if fileNum > maxNum {
maxNum = fileNum
}
if fileNum < minNum {
minNum = fileNum
}
}
}
// If files were found, set q.firstSegment and q.lastSegment
if maxNum > 0 {
// We found files
for {
seg, err := openQueueSegment(q.fullPath, minNum, q.turbo, q.builder)
if err != nil {
return errors.Wrap(err, "unable to create queue segment in "+q.fullPath)
}
// Make sure the first segment is not empty or it's not complete (i.e. is current)
if seg.size() > 0 || seg.sizeOnDisk() < q.config.ItemsPerSegment {
q.firstSegment = seg
break
}
// Delete the segment as it's empty and complete
seg.delete()
// Try the next one
minNum++
}
if minNum == maxNum {
// We have only one segment so the
// first and last are the same instance (in this case)
q.lastSegment = q.firstSegment
} else {
// We have multiple segments
seg, err := openQueueSegment(q.fullPath, maxNum, q.turbo, q.builder)
if err != nil {
return errors.Wrap(err, "unable to create segment for "+q.fullPath)
}
q.lastSegment = seg
}
} else {
// We found no files so build a new queue starting with segment 1
seg, err := newQueueSegment(q.fullPath, 1, q.turbo, q.builder)
if err != nil {
return errors.Wrap(err, "unable to create queue segment in "+q.fullPath)
}
// The first and last are the same instance (in this case)
q.firstSegment = seg
q.lastSegment = seg
}
return nil
} | load populates the queue from disk | load | go | joncrlsn/dque | queue.go | https://github.com/joncrlsn/dque/blob/master/queue.go | MIT |
func (e ErrCorruptedSegment) Error() string {
return fmt.Sprintf("segment file %s is corrupted: %s", e.Path, e.Err)
} | Error returns a string describing ErrCorruptedSegment | Error | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (e ErrCorruptedSegment) Unwrap() error {
return e.Err
} | Unwrap returns the wrapped error | Unwrap | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (e ErrUnableToDecode) Error() string {
return fmt.Sprintf("object in segment file %s cannot be decoded: %s", e.Path, e.Err)
} | Error returns a string describing ErrUnableToDecode error | Error | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (e ErrUnableToDecode) Unwrap() error {
return e.Err
} | Unwrap returns the wrapped error | Unwrap | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) load() error {
// This is heavy-handed but its safe
seg.mutex.Lock()
defer seg.mutex.Unlock()
// Open the file in read mode
f, err := os.OpenFile(seg.filePath(), os.O_RDONLY, 0644)
if err != nil {
return errors.Wrap(err, "error opening file: "+seg.filePath())
}
defer f.Close()
seg.file = f
// Loop until we can load no more
for {
// Read the 4 byte length of the gob
lenBytes := make([]byte, 4)
if n, err := io.ReadFull(seg.file, lenBytes); err != nil {
if err == io.EOF {
return nil
}
return ErrCorruptedSegment{
Path: seg.filePath(),
Err: errors.Wrapf(err, "error reading object length (read %d/4 bytes)", n),
}
}
// Convert the bytes into a 32-bit unsigned int
gobLen := binary.LittleEndian.Uint32(lenBytes)
if gobLen == 0 {
// Remove the first item from the in-memory queue
if len(seg.objects) == 0 {
return ErrCorruptedSegment{
Path: seg.filePath(),
Err: fmt.Errorf("excess deletion records (%d)", seg.removeCount+1),
}
}
seg.objects = seg.objects[1:]
// log.Println("TEMP: Detected delete in load()")
seg.removeCount++
continue
}
data := make([]byte, int(gobLen))
if _, err := io.ReadFull(seg.file, data); err != nil {
return ErrCorruptedSegment{
Path: seg.filePath(),
Err: errors.Wrap(err, "error reading gob data from file"),
}
}
// Decode the bytes into an object
object := seg.objectBuilder()
if err := gob.NewDecoder(bytes.NewReader(data)).Decode(object); err != nil {
return ErrUnableToDecode{
Path: seg.filePath(),
Err: errors.Wrapf(err, "failed to decode %T", object),
}
}
// Add item to the objects slice
seg.objects = append(seg.objects, object)
// log.Printf("TEMP: Loaded: %#v\n", object)
}
} | load reads all objects from the queue file into a slice
returns ErrCorruptedSegment or ErrUnableToDecode for errors pertaining to file contents. | load | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) peek() (interface{}, error) {
// This is heavy-handed but its safe
seg.mutex.Lock()
defer seg.mutex.Unlock()
if len(seg.objects) == 0 {
// Queue is empty so return nil object (and emptySegment error)
return nil, errEmptySegment
}
// Save a reference to the first item in the in-memory queue
object := seg.objects[0]
return object, nil
} | peek returns the first item in the segment without removing it.
If the queue is already empty, the emptySegment error will be returned. | peek | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) remove() (interface{}, error) {
// This is heavy-handed but its safe
seg.mutex.Lock()
defer seg.mutex.Unlock()
if len(seg.objects) == 0 {
// Queue is empty so return nil object (and empty_segment error)
return nil, errEmptySegment
}
// Create a 4-byte length of value zero (this signifies a removal)
deleteLen := 0
deleteLenBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(deleteLenBytes, uint32(deleteLen))
// Write the 4-byte length (of zero) first
if _, err := seg.file.Write(deleteLenBytes); err != nil {
return nil, errors.Wrapf(err, "failed to remove item from segment %d", seg.number)
}
// Save a reference to the first item in the in-memory queue
object := seg.objects[0]
// Remove the first item from the in-memory queue
seg.objects = seg.objects[1:]
// Increment the delete count
seg.removeCount++
// Possibly force writes to disk
if err := seg._sync(); err != nil {
return nil, err
}
return object, nil
} | remove removes and returns the first item in the segment and adds
a zero length marker to the end of the queue file to signify a removal.
If the queue is already empty, the emptySegment error will be returned. | remove | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) add(object interface{}) error {
// This is heavy-handed but its safe
seg.mutex.Lock()
defer seg.mutex.Unlock()
// Encode the struct to a byte buffer
var buff bytes.Buffer
enc := gob.NewEncoder(&buff)
if err := enc.Encode(object); err != nil {
return errors.Wrap(err, "error gob encoding object")
}
// Count the bytes stored in the byte buffer
// and store the count into a 4-byte byte array
buffLen := len(buff.Bytes())
buffLenBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(buffLenBytes, uint32(buffLen))
// Write the 4-byte buffer length first
if _, err := seg.file.Write(buffLenBytes); err != nil {
return errors.Wrapf(err, "failed to write object length to segment %d", seg.number)
}
// Then write the buffer bytes
if _, err := seg.file.Write(buff.Bytes()); err != nil {
return errors.Wrapf(err, "failed to write object to segment %d", seg.number)
}
seg.objects = append(seg.objects, object)
// Possibly force writes to disk
return seg._sync()
} | Add adds an item to the in-memory queue segment and appends it to the persistent file | add | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) size() int {
// This is heavy-handed but its safe
seg.mutex.Lock()
defer seg.mutex.Unlock()
return len(seg.objects)
} | size returns the number of objects in this segment.
The size does not include items that have been removed. | size | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) sizeOnDisk() int {
// This is heavy-handed but its safe
seg.mutex.Lock()
defer seg.mutex.Unlock()
return len(seg.objects) + seg.removeCount
} | sizeOnDisk returns the number of objects in memory plus removed objects. This
number will match the number of objects still on disk.
This number is used to keep the file from growing forever when items are
removed about as fast as they are added. | sizeOnDisk | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) delete() error {
// This is heavy-handed but its safe
seg.mutex.Lock()
defer seg.mutex.Unlock()
if err := seg.file.Close(); err != nil {
return errors.Wrap(err, "unable to close the segment file before deleting")
}
// Delete the storage for this queue
err := os.Remove(seg.filePath())
if err != nil {
return errors.Wrap(err, "error deleting file: "+seg.filePath())
}
// Empty the in-memory slice of objects
seg.objects = seg.objects[:0]
seg.file = nil
return nil
} | delete wipes out the queue and its persistent state | delete | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) turboOn() {
seg.turbo = true
} | turboOn allows the filesystem to decide when to sync file changes to disk
Speed is be greatly increased by turning turbo on, however there is some
risk of losing data should a power-loss occur. | turboOn | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) turboOff() error {
if !seg.turbo {
// turboOff is know to be called twice when the first and last ssegments
// are the same.
return nil
}
if err := seg.turboSync(); err != nil {
return err
}
seg.turbo = false
return nil
} | turboOff re-enables the "safety" mode that syncs every file change to disk as
they happen. | turboOff | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) turboSync() error {
if !seg.turbo {
// When the first and last segments are the same, this method
// will be called twice.
return nil
}
if seg.maybeDirty {
if err := seg.file.Sync(); err != nil {
return errors.Wrap(err, "unable to sync file changes.")
}
seg.syncCount++
seg.maybeDirty = false
}
return nil
} | turboSync does an fsync to disk if turbo is on. | turboSync | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) _sync() error {
if seg.turbo {
// We do *not* force a sync if turbo is on
// We just mark it maybe dirty
seg.maybeDirty = true
return nil
}
if err := seg.file.Sync(); err != nil {
return errors.Wrap(err, "unable to sync file changes in _sync method.")
}
seg.syncCount++
seg.maybeDirty = false
return nil
} | _sync must only be called by the add and remove methods on qSegment.
Only syncs if turbo is off | _sync | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func (seg *qSegment) close() error {
if err := seg.file.Close(); err != nil {
return errors.Wrapf(err, "unable to close segment file %s.", seg.fileName())
}
return nil
} | close is used when this is the last segment, but is now full, so we are
creating a new last segment.
This should only be called if this segment is not also the first segment. | close | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func newQueueSegment(dirPath string, number int, turbo bool, builder func() interface{}) (*qSegment, error) {
seg := qSegment{dirPath: dirPath, number: number, turbo: turbo, objectBuilder: builder}
if !dirExists(seg.dirPath) {
return nil, errors.New("dirPath is not a valid directory: " + seg.dirPath)
}
if fileExists(seg.filePath()) {
return nil, errors.New("file already exists: " + seg.filePath())
}
// Create the file in append mode
var err error
seg.file, err = os.OpenFile(seg.filePath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, errors.Wrapf(err, "error creating file: %s.", seg.filePath())
}
// Leave the file open for future writes
return &seg, nil
} | newQueueSegment creates a new, persistent segment of the queue | newQueueSegment | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func openQueueSegment(dirPath string, number int, turbo bool, builder func() interface{}) (*qSegment, error) {
seg := qSegment{dirPath: dirPath, number: number, turbo: turbo, objectBuilder: builder}
if !dirExists(seg.dirPath) {
return nil, errors.New("dirPath is not a valid directory: " + seg.dirPath)
}
if !fileExists(seg.filePath()) {
return nil, errors.New("file does not exist: " + seg.filePath())
}
// Load the items into memory
if err := seg.load(); err != nil {
return nil, errors.Wrap(err, "unable to load queue segment in "+dirPath)
}
// Re-open the file in append mode
var err error
seg.file, err = os.OpenFile(seg.filePath(), os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return nil, errors.Wrap(err, "error opening file: "+seg.filePath())
}
// Leave the file open for future writes
return &seg, nil
} | openQueueSegment reads an existing persistent segment of the queue into memory | openQueueSegment | go | joncrlsn/dque | segment.go | https://github.com/joncrlsn/dque/blob/master/segment.go | MIT |
func dirExists(path string) bool {
fileInfo, err := os.Stat(path)
if err == nil {
return fileInfo.IsDir()
}
return false
} | dirExists returns true or false | dirExists | go | joncrlsn/dque | util.go | https://github.com/joncrlsn/dque/blob/master/util.go | MIT |
func fileExists(path string) bool {
fileInfo, err := os.Stat(path)
if err == nil {
return !fileInfo.IsDir()
}
return false
} | fileExists returns true or false | fileExists | go | joncrlsn/dque | util.go | https://github.com/joncrlsn/dque/blob/master/util.go | MIT |
func setupTLSConfig(tlsConfig TLSConfig) (*tls.Config, error) {
if !tlsConfig.Enabled {
return nil, errors.Errorf("tls should be enabled at this point")
}
caPool, err := x509.SystemCertPool()
if err != nil {
output.Warnf("error reading system cert pool: %v", err)
caPool = x509.NewCertPool()
}
if tlsConfig.CA != "" {
caString, err := os.ReadFile(tlsConfig.CA)
if err != nil {
return nil, err
}
ok := caPool.AppendCertsFromPEM(caString)
if !ok {
return nil, errors.Errorf("unable to add ca at %s to certificate pool", tlsConfig.CA)
}
}
var clientCert tls.Certificate
if tlsConfig.Cert != "" && tlsConfig.CertKey != "" {
clientCert, err = tls.LoadX509KeyPair(tlsConfig.Cert, tlsConfig.CertKey)
if err != nil {
return nil, err
}
}
bundle := &tls.Config{
RootCAs: caPool,
Certificates: []tls.Certificate{clientCert},
}
if tlsConfig.Insecure {
bundle.InsecureSkipVerify = true
}
return bundle, nil
} | setupTlsConfig takes the paths to a tls certificate, CA, and certificate key in
a PEM format and returns a constructed tls.Config object. | setupTLSConfig | go | deviceinsight/kafkactl | internal/common-operation.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/common-operation.go | Apache-2.0 |
func (shell *ShellRunner) Execute(binary string, args []string) error {
cmd := exec.Command(binary, args...)
cmd.Dir = shell.Dir
cmd.Env = os.Environ()
// get stdOut of cmd
stdoutIn, _ := cmd.StdoutPipe()
// stdin, stderr directly mapped from outside
cmd.Stdin = os.Stdin
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
return err
}
var wg sync.WaitGroup
var writeErr error
wg.Add(1)
go func() {
writeErr = filterOutput(output.IoStreams.Out, stdoutIn)
wg.Done()
}()
wg.Wait()
if writeErr != nil {
return fmt.Errorf("unable to write std out: %w", writeErr)
}
err = cmd.Wait()
if err != nil {
var exitError *exec.ExitError
switch {
case errors.As(err, &exitError):
// Propagate any non-zero exit status from the external command
waitStatus := exitError.Sys().(syscall.WaitStatus)
exitStatus := waitStatus.ExitStatus()
err = newExitError(cmd.Path, cmd.Args, exitStatus, exitError, "", "")
default:
return fmt.Errorf("unexpected error: %w", err)
}
}
return err
} | Execute a shell command | Execute | go | deviceinsight/kafkactl | internal/k8s/runner.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/k8s/runner.go | Apache-2.0 |
func Indent(text, indent string) string {
var b strings.Builder
b.Grow(len(text) * 2)
lines := strings.Split(text, "\n")
last := len(lines) - 1
for i, j := range lines {
if i > 0 && i < last && j != "" {
b.WriteString("\n")
}
if j != "" {
b.WriteString(indent + j)
}
}
return b.String()
} | indents a block of text with an indent string | Indent | go | deviceinsight/kafkactl | internal/k8s/exit_error.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/k8s/exit_error.go | Apache-2.0 |
func FormatBaseURL(baseURL string) string {
if baseURL == "" {
return ""
}
// remove last slash, so the API can append the path with ease.
if baseURL[len(baseURL)-1] == '/' {
baseURL = baseURL[0 : len(baseURL)-1]
}
schemaIdx := strings.Index(baseURL, "://")
injectSchema := schemaIdx < 0
if injectSchema {
baseURL = fmt.Sprintf("http://%s", baseURL)
}
parsedURL, err := url.Parse(baseURL)
if err != nil {
panic("Schema registry url invalid")
}
if injectSchema && parsedURL.Port() == "443" {
parsedURL.Scheme = "https"
}
if parsedURL.Port() == "" {
if parsedURL.Scheme == "https" {
parsedURL.Host = fmt.Sprintf("%s:%s", parsedURL.Hostname(), "443")
} else {
parsedURL.Host = fmt.Sprintf("%s:%s", parsedURL.Hostname(), "80")
}
}
return parsedURL.String()
} | FormatBaseURL will try to make sure that the schema:host:port:path pattern is followed on the `baseURL` field. | FormatBaseURL | go | deviceinsight/kafkactl | internal/helpers/schemaregistry/schema_registry.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/helpers/schemaregistry/schema_registry.go | Apache-2.0 |
func NewJVMCompatiblePartitioner(topic string) sarama.Partitioner {
return sarama.NewCustomHashPartitioner(MurmurHasher)(topic)
} | NewJVMCompatiblePartitioner creates a Sarama partitioner that uses
the same hashing algorithm as JVM Kafka clients. | NewJVMCompatiblePartitioner | go | deviceinsight/kafkactl | internal/producer/JVMCompatiblePartitioner.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/producer/JVMCompatiblePartitioner.go | Apache-2.0 |
func MurmurHasher() hash.Hash32 {
return new(murmurHash)
} | MurmurHasher creates murmur2 hasher implementing hash.Hash32 interface.
The implementation is not full and does not support streaming.
It only implements the interface to comply with sarama.NewCustomHashPartitioner signature.
But Sarama only uses Write method once, when writing keys and values of the message,
so streaming support is not necessary. | MurmurHasher | go | deviceinsight/kafkactl | internal/producer/JVMCompatiblePartitioner.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/producer/JVMCompatiblePartitioner.go | Apache-2.0 |
func murmur2(data []byte) int32 {
length := int32(len(data))
seed := uint32(0x9747b28c)
m := int32(0x5bd1e995)
r := uint32(24)
h := int32(seed ^ uint32(length))
length4 := length / 4
for i := int32(0); i < length4; i++ {
i4 := i * 4
k := int32(data[i4+0]&0xff) + (int32(data[i4+1]&0xff) << 8) + (int32(data[i4+2]&0xff) << 16) + (int32(data[i4+3]&0xff) << 24)
k *= m
k ^= int32(uint32(k) >> r)
k *= m
h *= m
h ^= k
}
switch length % 4 {
case 3:
h ^= int32(data[(length & ^3)+2]&0xff) << 16
fallthrough
case 2:
h ^= int32(data[(length & ^3)+1]&0xff) << 8
fallthrough
case 1:
h ^= int32(data[length & ^3] & 0xff)
h *= m
}
h ^= int32(uint32(h) >> 13)
h *= m
h ^= int32(uint32(h) >> 15)
return h
} | murmur2 implements hashing algorithm used by JVM clients for Kafka.
See the original implementation: https://github.com/apache/kafka/blob/1.0.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L353 | murmur2 | go | deviceinsight/kafkactl | internal/producer/JVMCompatiblePartitioner.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/producer/JVMCompatiblePartitioner.go | Apache-2.0 |
func toPositive(i int32) int32 {
return i & 0x7fffffff
} | toPositive converts i to positive number as per the original implementation in the JVM clients for Kafka.
See the original implementation: https://github.com/apache/kafka/blob/1.0.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L741 | toPositive | go | deviceinsight/kafkactl | internal/producer/JVMCompatiblePartitioner.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/producer/JVMCompatiblePartitioner.go | Apache-2.0 |
func (c *config) Init() error {
viper.Reset()
if c.flags.Verbose {
output.IoStreams.EnableDebug()
}
configFile := resolveProjectConfigFileFromWorkingDir()
switch {
case c.flags.ConfigFile != "":
configFile = &c.flags.ConfigFile
case os.Getenv("KAFKA_CTL_CONFIG") != "":
envConfig := os.Getenv("KAFKA_CTL_CONFIG")
configFile = &envConfig
}
mapEnvVariables()
if err := c.loadConfig(viper.GetViper(), configFile); err != nil {
if isUnknownError(err) {
return fmt.Errorf("error reading config file: %s (%v)", viper.ConfigFileUsed(), err.Error())
}
err = generateDefaultConfig()
if err != nil {
return fmt.Errorf("error generating default config file: %v", err.Error())
}
// We read generated config now
if err = c.loadConfig(viper.GetViper(), configFile); err != nil {
return fmt.Errorf("error reading config file: %s (%v)", viper.ConfigFileUsed(), err.Error())
}
}
if configFile != nil && viper.GetString("current-context") == "" {
// assuming the provided configFile is read-only
c.writableConfig = viper.New()
if err := c.loadConfig(c.writableConfig, nil); err != nil {
if isUnknownError(err) {
return fmt.Errorf("error reading config file: %s (%v)", c.writableConfig.ConfigFileUsed(), err.Error())
}
err = generateDefaultConfig()
if err != nil {
return fmt.Errorf("error generating default config file: %v", err.Error())
}
// We read generated config now
if err = c.loadConfig(c.writableConfig, configFile); err != nil {
return fmt.Errorf("error reading config file: %s (%v)", viper.ConfigFileUsed(), err.Error())
}
}
} else {
c.writableConfig = viper.GetViper()
}
return nil
} | Init reads in config file and ENV variables if set. | Init | go | deviceinsight/kafkactl | internal/global/config.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/global/config.go | Apache-2.0 |
func generateDefaultConfig() error {
cfgFile := filepath.Join(os.ExpandEnv(configPaths[0]), "config.yml")
if os.Getenv("KAFKA_CTL_CONFIG") != "" {
// use config file provided via env
cfgFile = os.Getenv("KAFKA_CTL_CONFIG")
} else if runtime.GOOS == "windows" {
// use different configFile when running on windows
for _, configPath := range configPaths {
if strings.Contains(configPath, "$APPDATA") {
cfgFile = filepath.Join(os.ExpandEnv(configPath), "config.yml")
break
}
}
}
if err := os.MkdirAll(filepath.Dir(cfgFile), os.FileMode(0700)); err != nil {
return err
}
viper.SetDefault("contexts.default.brokers", []string{"localhost:9092"})
viper.SetDefault("current-context", "default")
if err := viper.WriteConfigAs(cfgFile); err != nil {
return err
}
output.Debugf("generated default config at %s", cfgFile)
return nil
} | generateDefaultConfig generates default config in case there is no config | generateDefaultConfig | go | deviceinsight/kafkactl | internal/global/config.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/global/config.go | Apache-2.0 |
func replicationFactor(t Topic) int {
var factor int
for _, partition := range t.Partitions {
if len(partition.Replicas) < factor || factor == 0 {
factor = len(partition.Replicas)
}
}
return factor
} | replicationFactor for topic calculated as minimal replication factor across partitions. | replicationFactor | go | deviceinsight/kafkactl | internal/topic/topic-operation.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/topic/topic-operation.go | Apache-2.0 |
func ConvertToEpocUnixMillis(timestamp string) (int64, error) {
if timestamp == "" {
return -1, nil
}
aTime, err := util.ParseTimestamp(timestamp)
if err != nil {
return -1, err
}
return aTime.UnixMilli(), nil
} | Converts string to epoch unix timestamp
The string might be null in that case, the flag is considered absent and the value -1 is returned | ConvertToEpocUnixMillis | go | deviceinsight/kafkactl | internal/consume/PartitionConsumer.go | https://github.com/deviceinsight/kafkactl/blob/master/internal/consume/PartitionConsumer.go | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.