file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
admission_test.go | ("", "")
if err != nil {
t.Fatalf("Unexpected error while creating temporary file: %v", err)
}
p := tempfile.Name()
defer os.Remove(p)
kubeconfig := `
clusters:
- name: foo
cluster:
server: https://example.com
users:
- name: alice
user:
token: deadbeef
contexts:
- name: default
context:
cluster: foo
user: alice
current-context: default
`
if _, err := tempfile.WriteString(kubeconfig); err != nil {
t.Fatalf("Unexpected error while writing test kubeconfig file: %v", err)
}
tests := []struct {
note string
input string
wantErr bool
}{
{"no config", "", true},
{"bad json", `{"foo": `, true},
{"bad yaml", `{foo" `, true},
{
"missing kubeconfig",
`{"foo": {}}`,
true,
},
{
"kubeconfig not found",
`{
"kubeconfig": "/kube-federation-scheduling-policy-file-not-found-test"
}`,
true,
},
{
"bad retry backoff",
fmt.Sprintf(`
{
"kubeconfig": %q,
"retryBackoff": -1
}
`, p),
true,
},
{
"a valid config",
fmt.Sprintf(`
{
"kubeconfig": %q
}
`, p),
false,
},
{
"a valid config with retry backoff",
fmt.Sprintf(`
{
"kubeconfig": %q,
"retryBackoff": 200
}
`, p),
false,
},
}
for _, tc := range tests {
var file io.Reader
if tc.input == "" {
file = nil
} else {
file = bytes.NewBufferString(tc.input)
}
_, err := newAdmissionController(file)
if tc.wantErr && err == nil {
t.Errorf("%v: Expected error", tc.note)
} else if !tc.wantErr && err != nil {
t.Errorf("%v: Unexpected error: %v", tc.note, err)
}
}
}
func TestAdmitQueryPayload(t *testing.T) {
var body interface{}
serve := func(w http.ResponseWriter, r *http.Request) {
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
t.Fatalf("Unexpected error reading admission payload: %v", err)
}
// No errors or annotations.
w.Write([]byte(`{}`))
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
rs := makeReplicaSet()
rs.Spec.MinReadySeconds = 100
attrs := makeAdmissionRecord(rs)
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
obj := body.(map[string]interface{})
metadata := obj["metadata"].(map[string]interface{})
spec := obj["spec"].(map[string]interface{})
name := metadata["name"].(string)
minReadySeconds := spec["minReadySeconds"].(float64)
expectedName := "myapp"
if name != expectedName {
t.Fatalf("Expected replicaset.metadata.name to be %v but got: %v", expectedName, name)
}
expectedMinReadySeconds := float64(100)
if minReadySeconds != expectedMinReadySeconds {
t.Fatalf("Expected replicaset.spec.minReadySeconds to be %v but got: %v", expectedMinReadySeconds, minReadySeconds)
}
}
func TestAdmitFailInternal(t *testing.T) | if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
}
func TestAdmitPolicyDoesNotExist(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404)
}, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Expected admission controller to fail open but got error: %v", err)
}
}
func TestAdmitFailClosed(t *testing.T) {
tests := []struct {
note string
statusCode int
body string
}{
{"server error", 500, ""},
{"unmarshal error", 200, "{"},
{"undefined result", 404, ``},
{"policy errors", 200, `{"errors": ["conflicting replica-set-preferences"]}`},
}
for _, tc := range tests {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tc.statusCode)
if len(tc.body) > 0 {
w.Write([]byte(tc.body))
}
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Errorf("%v: Unexpected error while creating test admission controller/server: %v", tc.note, err)
continue
}
obj := makeReplicaSet()
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err == nil {
t.Errorf("%v: Expected admission controller to fail closed", tc.note)
}
}
}
func TestAdmitRetries(t *testing.T) {
var numQueries int
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
numQueries++
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
err = controller.Admit(makeAdmissionRecord(makeReplicaSet()))
if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
if numQueries <= 1 {
t.Fatalf("Expected multiple queries/retries but got (numQueries): %v", numQueries)
}
}
func TestAdmitSuccessWithAnnotationMerge(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`
{
"annotations": {
"foo": "bar-2"
}
}
`))
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
obj := makeReplicaSet()
obj.Annotations = map[string]string{}
obj.Annotations["foo"] = "bar"
obj.Annotations["bar"] = "baz"
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
annotations := attrs.GetObject().(*extensionsv1.ReplicaSet).Annotations
expected := map[string]string{
"foo": "bar-2",
"bar": "baz",
}
if !reflect.DeepEqual(annotations, expected) {
t.Fatalf("Expected annotations to be %v but got: %v", expected, annotations)
}
}
func newControllerWithTestServer(f func(w http.ResponseWriter, r *http.Request), policiesExist bool) (*admissionController, error) {
server, err := newTestServer(f)
if err != nil {
return nil, err
}
kubeConfigFile, err := makeKubeConfigFile(server.URL, "/some/path/to/decision")
if err != nil {
return nil, err
}
defer os.Remove(kubeConfigFile)
configFile, err := makeAdmissionControlConfigFile(kubeConfigFile)
if err != nil {
return nil, err
}
defer os.Remove(configFile)
file, err := os.Open(configFile)
if err != nil {
return nil, err
}
controller, err := newAdmissionController(file)
if err != nil {
return nil, err
}
mockClient := &fake.Clientset{}
var items []api.ConfigMap
if policiesExist {
items = append(items, api.ConfigMap | {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
}
controller, err := newControllerWithTestServer(serve, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
mockClient := &fake.Clientset{}
mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("unknown error")
})
controller.SetInternalKubeClientSet(mockClient)
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
| identifier_body |
admission_test.go | "a valid config",
fmt.Sprintf(`
{
"kubeconfig": %q
}
`, p),
false,
},
{
"a valid config with retry backoff",
fmt.Sprintf(`
{
"kubeconfig": %q,
"retryBackoff": 200
}
`, p),
false,
},
}
for _, tc := range tests {
var file io.Reader
if tc.input == "" {
file = nil
} else {
file = bytes.NewBufferString(tc.input)
}
_, err := newAdmissionController(file)
if tc.wantErr && err == nil {
t.Errorf("%v: Expected error", tc.note)
} else if !tc.wantErr && err != nil {
t.Errorf("%v: Unexpected error: %v", tc.note, err)
}
}
}
func TestAdmitQueryPayload(t *testing.T) {
var body interface{}
serve := func(w http.ResponseWriter, r *http.Request) {
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
t.Fatalf("Unexpected error reading admission payload: %v", err)
}
// No errors or annotations.
w.Write([]byte(`{}`))
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
rs := makeReplicaSet()
rs.Spec.MinReadySeconds = 100
attrs := makeAdmissionRecord(rs)
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
obj := body.(map[string]interface{})
metadata := obj["metadata"].(map[string]interface{})
spec := obj["spec"].(map[string]interface{})
name := metadata["name"].(string)
minReadySeconds := spec["minReadySeconds"].(float64)
expectedName := "myapp"
if name != expectedName {
t.Fatalf("Expected replicaset.metadata.name to be %v but got: %v", expectedName, name)
}
expectedMinReadySeconds := float64(100)
if minReadySeconds != expectedMinReadySeconds {
t.Fatalf("Expected replicaset.spec.minReadySeconds to be %v but got: %v", expectedMinReadySeconds, minReadySeconds)
}
}
func TestAdmitFailInternal(t *testing.T) {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
}
controller, err := newControllerWithTestServer(serve, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
mockClient := &fake.Clientset{}
mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("unknown error")
})
controller.SetInternalKubeClientSet(mockClient)
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
}
func TestAdmitPolicyDoesNotExist(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(404)
}, false)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
attrs := makeAdmissionRecord(makeReplicaSet())
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Expected admission controller to fail open but got error: %v", err)
}
}
func TestAdmitFailClosed(t *testing.T) {
tests := []struct {
note string
statusCode int
body string
}{
{"server error", 500, ""},
{"unmarshal error", 200, "{"},
{"undefined result", 404, ``},
{"policy errors", 200, `{"errors": ["conflicting replica-set-preferences"]}`},
}
for _, tc := range tests {
serve := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(tc.statusCode)
if len(tc.body) > 0 {
w.Write([]byte(tc.body))
}
}
controller, err := newControllerWithTestServer(serve, true)
if err != nil {
t.Errorf("%v: Unexpected error while creating test admission controller/server: %v", tc.note, err)
continue
}
obj := makeReplicaSet()
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err == nil {
t.Errorf("%v: Expected admission controller to fail closed", tc.note)
}
}
}
func TestAdmitRetries(t *testing.T) {
var numQueries int
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
numQueries++
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
err = controller.Admit(makeAdmissionRecord(makeReplicaSet()))
if err == nil {
t.Fatalf("Expected admission controller to fail closed")
}
if numQueries <= 1 {
t.Fatalf("Expected multiple queries/retries but got (numQueries): %v", numQueries)
}
}
func TestAdmitSuccessWithAnnotationMerge(t *testing.T) {
controller, err := newControllerWithTestServer(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`
{
"annotations": {
"foo": "bar-2"
}
}
`))
}, true)
if err != nil {
t.Fatalf("Unexpected error while creating test admission controller/server: %v", err)
}
obj := makeReplicaSet()
obj.Annotations = map[string]string{}
obj.Annotations["foo"] = "bar"
obj.Annotations["bar"] = "baz"
attrs := admission.NewAttributesRecord(obj, nil, obj.GroupVersionKind(), obj.Namespace, obj.Name, api.Resource("replicasets").WithVersion("version"), "", admission.Create, &user.DefaultInfo{})
err = controller.Admit(attrs)
if err != nil {
t.Fatalf("Unexpected error from admission controller: %v", err)
}
annotations := attrs.GetObject().(*extensionsv1.ReplicaSet).Annotations
expected := map[string]string{
"foo": "bar-2",
"bar": "baz",
}
if !reflect.DeepEqual(annotations, expected) {
t.Fatalf("Expected annotations to be %v but got: %v", expected, annotations)
}
}
func newControllerWithTestServer(f func(w http.ResponseWriter, r *http.Request), policiesExist bool) (*admissionController, error) {
server, err := newTestServer(f)
if err != nil {
return nil, err
}
kubeConfigFile, err := makeKubeConfigFile(server.URL, "/some/path/to/decision")
if err != nil {
return nil, err
}
defer os.Remove(kubeConfigFile)
configFile, err := makeAdmissionControlConfigFile(kubeConfigFile)
if err != nil {
return nil, err
}
defer os.Remove(configFile)
file, err := os.Open(configFile)
if err != nil {
return nil, err
}
controller, err := newAdmissionController(file)
if err != nil {
return nil, err
}
mockClient := &fake.Clientset{}
var items []api.ConfigMap
if policiesExist {
items = append(items, api.ConfigMap{})
}
mockClient.AddReactor("list", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
if action.GetNamespace() == policyConfigMapNamespace {
return true, &api.ConfigMapList{Items: items}, nil
}
return true, nil, nil
})
controller.SetInternalKubeClientSet(mockClient)
return controller, nil
}
func newTestServer(f func(w http.ResponseWriter, r *http.Request)) (*httptest.Server, error) {
server := httptest.NewUnstartedServer(http.HandlerFunc(f))
server.Start()
return server, nil
}
func makeAdmissionControlConfigFile(kubeConfigFile string) (string, error) {
tempfile, err := ioutil.TempFile("", "")
if err != nil {
return "", err
}
p := tempfile.Name()
configFileTmpl := `
kubeconfig: {{ .KubeConfigFile }}
retryBackoff: {{ .RetryBackoff }}
`
type configFileTemplateInput struct {
KubeConfigFile string
RetryBackoff int
}
input := configFileTemplateInput{
KubeConfigFile: kubeConfigFile,
RetryBackoff: 1,
}
tmpl, err := template.New("scheduling-policy-config").Parse(configFileTmpl)
if err != nil {
return "", err
}
if err := tmpl.Execute(tempfile, input); err != nil {
return "", err
}
return p, nil
}
func | makeKubeConfigFile | identifier_name |
|
precompiled_objects.go | PrecompiledObjects returns stored at the cloud storage bucket precompiled objects for the target category
func (cd *CloudStorage) GetPrecompiledObjects(ctx context.Context, targetSdk pb.Sdk, targetCategory, bucketName string) (*SdkToCategories, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, Timeout)
defer cancel()
precompiledObjects := make(SdkToCategories, 0)
bucket := client.Bucket(bucketName)
dirs, err := cd.getPrecompiledObjectsDirs(ctx, targetSdk, bucket)
if err != nil {
return nil, err
}
metaFiles := make(map[string][]byte, 0)
for objectDir := range dirs {
infoPath := filepath.Join(objectDir, MetaInfoName) // helping file with information about this object
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
metaFiles[objectDir] = metaFile
rc.Close()
}
for objectDir, metaFile := range metaFiles {
precompiledObject := ObjectInfo{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
continue
}
folderName := strings.Split(objectDir, string(os.PathSeparator))[1]
precompiledObject.Type = pb.PrecompiledObjectType(pb.PrecompiledObjectType_value[folderName])
for _, objectCategory := range precompiledObject.Categories {
if targetCategory == "" || targetCategory == objectCategory { //take only requested categories
appendPrecompiledObject(precompiledObject, &precompiledObjects, objectDir, objectCategory)
}
}
}
return &precompiledObjects, nil
}
// GetDefaultPrecompiledObjects returns the default precompiled objects
func (cd *CloudStorage) GetDefaultPrecompiledObjects(ctx context.Context, bucketName string) (map[pb.Sdk]*pb.PrecompiledObject, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
bucket := client.Bucket(bucketName)
paths := make(map[pb.Sdk]string, 0)
for _, sdkName := range pb.Sdk_name {
sdk := pb.Sdk(pb.Sdk_value[sdkName])
if sdk == pb.Sdk_SDK_UNSPECIFIED {
continue
}
path, err := cd.getDefaultPrecompiledObjectsPath(ctx, bucket, sdk)
if err != nil {
return nil, err
}
paths[sdk] = path
}
defaultPrecompiledObjects := make(map[pb.Sdk]*pb.PrecompiledObject, 0)
for sdk, path := range paths {
infoPath := filepath.Join(path, MetaInfoName)
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
rc.Close()
precompiledObject := &pb.PrecompiledObject{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
return nil, err
}
precompiledObject.CloudPath = path
defaultPrecompiledObjects[sdk] = precompiledObject
}
return defaultPrecompiledObjects, nil
}
// getDefaultPrecompiledObjectsPath returns path for SDK to the default precompiled object
func (cd *CloudStorage) getDefaultPrecompiledObjectsPath(ctx context.Context, bucket *storage.BucketHandle, sdk pb.Sdk) (string, error) {
pathToFile := fmt.Sprintf("%s/%s", sdk.String(), defaultPrecompiledObjectInfo)
rc, err := bucket.Object(pathToFile).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", pathToFile, err.Error())
return "", err
}
data, err := io.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
return "", err
}
path := make(map[string]string, 0)
if err := json.Unmarshal(data, &path); err != nil {
return "", err
}
return path[sdk.String()], nil
}
// getPrecompiledObjectsDirs finds directories with precompiled objects
// Since there is no notion of directory at cloud storage, then
// to avoid duplicates of a base path (directory) need to store it in a set/map.
func (cd *CloudStorage) getPrecompiledObjectsDirs(ctx context.Context, targetSdk pb.Sdk, bucket *storage.BucketHandle) (map[string]bool, error) {
prefix := targetSdk.String()
if targetSdk == pb.Sdk_SDK_UNSPECIFIED {
prefix = ""
}
it := bucket.Objects(ctx, &storage.Query{
Prefix: prefix,
})
objectDirs := make(map[string]bool, 0)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
bucketAttrs, errWithAttrs := bucket.Attrs(ctx)
if errWithAttrs != nil {
return nil, fmt.Errorf("error during receiving bucket's attributes: %s", err)
}
return nil, fmt.Errorf("Bucket(%q).Objects: %v", bucketAttrs.Name, err)
}
path := attrs.Name
if isPathToPrecompiledObjectFile(path) {
objectDirs[filepath.Dir(path)] = true //save base path (directory) of a file
}
}
return objectDirs, nil
}
// appendPrecompiledObject add precompiled object to the common structure of precompiled objects
func appendPrecompiledObject(objectInfo ObjectInfo, sdkToCategories *SdkToCategories, pathToObject string, categoryName string) {
sdkName := getSdkName(pathToObject)
categoryToPrecompiledObjects, ok := (*sdkToCategories)[sdkName]
if !ok {
(*sdkToCategories)[sdkName] = make(CategoryToPrecompiledObjects, 0)
categoryToPrecompiledObjects = (*sdkToCategories)[sdkName]
}
objects, ok := categoryToPrecompiledObjects[categoryName]
if !ok {
categoryToPrecompiledObjects[categoryName] = make(PrecompiledObjects, 0)
objects = categoryToPrecompiledObjects[categoryName]
}
objectInfo.CloudPath = pathToObject
objectInfo.Name = filepath.Base(pathToObject)
categoryToPrecompiledObjects[categoryName] = append(objects, objectInfo)
}
// getFileFromBucket receives the file from the bucket by its name
func (cd *CloudStorage) getFileFromBucket(ctx context.Context, pathToObject string, extension, bucketName string) ([]byte, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, Timeout)
defer cancel()
bucket := client.Bucket(bucketName)
filePath := pathToObject
if extension != "" {
filePath = getFullFilePath(pathToObject, extension)
}
rc, err := bucket.Object(filePath).NewReader(ctx)
if err != nil {
return nil, fmt.Errorf("Object(%q).NewReader: %v", filePath, err)
}
defer rc.Close()
data, err := ioutil.ReadAll(rc)
if err != nil {
return nil, fmt.Errorf("ioutil.ReadAll: %v", err)
}
return data, nil
}
// getFileExtensionBySdk get extension of the file with code by the sdk name
func getFileExtensionBySdk(precompiledObjectPath string) (string, error) {
sdk := strings.Split(precompiledObjectPath, string(os.PathSeparator))[0]
var extension string
switch sdk {
case pb.Sdk_SDK_JAVA.String():
extension = javaExtension
case pb.Sdk_SDK_PYTHON.String():
extension = pyExtension
case pb.Sdk_SDK_GO.String():
extension = goExtension
case pb.Sdk_SDK_SCIO.String():
extension = scioExtension
default:
return "", fmt.Errorf("")
}
return extension, nil
}
// getFullFilePath get full path to the precompiled object file
func getFullFilePath(objectDir string, extension string) string {
precompiledObjectName := filepath.Base(objectDir) //the base of the object's directory matches the name of the file
fileName := strings.Join([]string{precompiledObjectName, extension}, ".")
filePath := filepath.Join(objectDir, fileName)
return filePath
}
// isPathToPrecompiledObjectFile is it a path where precompiled object is stored (i.e. SDK/ObjectType/ObjectName/ObjectCode.sdkExtension)
func isPathToPrecompiledObjectFile(path string) bool {
return strings.Count(path, string(os.PathSeparator)) == separatorsNumber && !isDir(path)
}
// isDir checks whether the path imitates directory
func | isDir | identifier_name |
|
precompiled_objects.go | protobuf:"varint,7,opt,name=multifile,proto3" json:"multifile,omitempty"`
ContextLine int32 `protobuf:"varint,7,opt,name=context_line,proto3" json:"context_line,omitempty"`
DefaultExample bool `protobuf:"varint,7,opt,name=default_example,json=defaultExample,proto3" json:"default_example,omitempty"`
}
type PrecompiledObjects []ObjectInfo
type CategoryToPrecompiledObjects map[string]PrecompiledObjects
type SdkToCategories map[string]CategoryToPrecompiledObjects
// CloudStorage represents working tools for getting compiled and
// run beam examples from Google Cloud Storage. It is required that
// the bucket where examples are stored would be public,
// and it has a specific structure of files, namely:
// SDK_JAVA/
// ----defaultPrecompiledObject.info
// ----PRECOMPILED_OBJECT_TYPE_EXAMPLE/
// --------MinimalWordCount/
// ----------- MinimalWordCount.java
// ----------- MinimalWordCount.output
// ----------- MinimalWordCount.log
// ----------- MinimalWordCount.graph
// ----------- meta.info
// --------JoinExamples/
// ----------- JoinExamples.java
// ----------- JoinExamples.output
// ----------- JoinExamples.log
// ----------- JoinExamples.graph
// ----------- meta.info
// ----PRECOMPILED_OBJECT_TYPE_KATA/
// --------...
// ----...
// SDK_GO/
// ----defaultPrecompiledObject.info
// ----PRECOMPILED_OBJECT_TYPE_EXAMPLE/
// --------MinimalWordCount/
// ----------- MinimalWordCount.go
// ----------- MinimalWordCount.output
// ----------- MinimalWordCount.log
// ----------- MinimalWordCount.graph
// ----------- meta.info
// --------PingPong/
// ----PRECOMPILED_OBJECT_TYPE_KATA/
// --------...
// ----...
//
// defaultPrecompiledObject.info is a file that contains path to the default example:
// {
// "SDK_JAVA": "SDK_JAVA/PRECOMPILED_OBJECT_TYPE_EXAMPLE/MinimalWordCount"
// }
//
// meta.info is a json file that has the following fields:
// {
// "name": "name of the example",
// "description": "Description of an example",
// "multifile": false
// "categories": ["Common", "IO"]
// "pipeline_options": "--key1 value1",
// "default_example": false,
// "context_line": 1,
// "link": "https://github.com/apache/beam/blob/master/path/to/example"
// }
//
type CloudStorage struct {
}
func New() *CloudStorage {
return &CloudStorage{}
}
// GetPrecompiledObject returns the precompiled example
func (cd *CloudStorage) GetPrecompiledObject(ctx context.Context, precompiledObjectPath, bucketName string) (*pb.PrecompiledObject, error) {
cloudPath := filepath.Join(precompiledObjectPath, MetaInfoName)
data, err := cd.getFileFromBucket(ctx, cloudPath, "", bucketName)
if err != nil {
return nil, err
}
precompiledObject := &pb.PrecompiledObject{}
err = json.Unmarshal(data, precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
return nil, err
}
precompiledObject.CloudPath = precompiledObjectPath
return precompiledObject, nil
}
// GetPrecompiledObjectCode returns the source code of the example
func (cd *CloudStorage) GetPrecompiledObjectCode(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) |
// GetPrecompiledObjectOutput returns the run output of the example
func (cd *CloudStorage) GetPrecompiledObjectOutput(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, OutputExtension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectLogs returns the logs of the example
func (cd *CloudStorage) GetPrecompiledObjectLogs(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, LogsExtension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectGraph returns the graph of the example
func (cd *CloudStorage) GetPrecompiledObjectGraph(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, GraphExtension, bucketName)
if err != nil {
return "", err
}
return string(data), nil
}
// GetPrecompiledObjects returns stored at the cloud storage bucket precompiled objects for the target category
func (cd *CloudStorage) GetPrecompiledObjects(ctx context.Context, targetSdk pb.Sdk, targetCategory, bucketName string) (*SdkToCategories, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, Timeout)
defer cancel()
precompiledObjects := make(SdkToCategories, 0)
bucket := client.Bucket(bucketName)
dirs, err := cd.getPrecompiledObjectsDirs(ctx, targetSdk, bucket)
if err != nil {
return nil, err
}
metaFiles := make(map[string][]byte, 0)
for objectDir := range dirs {
infoPath := filepath.Join(objectDir, MetaInfoName) // helping file with information about this object
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
metaFiles[objectDir] = metaFile
rc.Close()
}
for objectDir, metaFile := range metaFiles {
precompiledObject := ObjectInfo{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
continue
}
folderName := strings.Split(objectDir, string(os.PathSeparator))[1]
precompiledObject.Type = pb.PrecompiledObjectType(pb.PrecompiledObjectType_value[folderName])
for _, objectCategory := range precompiledObject.Categories {
if targetCategory == "" || targetCategory == objectCategory { //take only requested categories
appendPrecompiledObject(precompiledObject, &precompiledObjects, objectDir, objectCategory)
}
}
}
return &precompiledObjects, nil
}
// GetDefaultPrecompiledObjects returns the default precompiled objects
func (cd *CloudStorage) GetDefaultPrecompiledObjects(ctx context.Context, bucketName string) (map[pb.Sdk]*pb.PrecompiledObject, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
bucket := client.Bucket(bucketName)
paths := make(map[pb.Sdk]string, 0)
for _, sdkName := range pb.Sdk_name {
sdk := pb.Sdk(pb.Sdk_value[sdkName])
if sdk == pb.Sdk_SDK_UNSPECIFIED {
continue
}
path, err := cd.getDefaultPrecompiledObjectsPath(ctx, bucket, sdk)
if err != nil {
return nil, err
}
paths[sdk] = path
}
defaultPrecompiledObjects := make(map[pb.Sdk]*pb.PrecompiledObject, 0)
for sdk, path := range paths {
infoPath := filepath.Join(path, MetaInfoName)
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
rc.Close()
precompiledObject := &pb.PrecompiledObject{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
return nil, err
}
precompiledObject.CloudPath = path
defaultPrecompiledObjects[sdk] = precompiledObject
}
return defaultPrecompiledObjects, nil
}
// getDefaultPrecompiledObjectsPath returns path for SDK to the default precompiled object
func (cd *CloudStorage) getDefaultPrecompiledObjectsPath(ctx context.Context, bucket *storage.BucketHandle, sdk pb.Sdk) (string, error) {
pathToFile := fmt.Sprintf("%s/%s", sdk.String(), defaultPrecompiledObjectInfo)
rc, err := bucket.Object(pathToFile).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", pathToFile, err.Error())
return "", err
}
data, | {
extension, err := getFileExtensionBySdk(precompiledObjectPath)
if err != nil {
return "", err
}
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, extension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
} | identifier_body |
precompiled_objects.go | ----------- JoinExamples.output
// ----------- JoinExamples.log
// ----------- JoinExamples.graph
// ----------- meta.info
// ----PRECOMPILED_OBJECT_TYPE_KATA/
// --------...
// ----...
// SDK_GO/
// ----defaultPrecompiledObject.info
// ----PRECOMPILED_OBJECT_TYPE_EXAMPLE/
// --------MinimalWordCount/
// ----------- MinimalWordCount.go
// ----------- MinimalWordCount.output
// ----------- MinimalWordCount.log
// ----------- MinimalWordCount.graph
// ----------- meta.info
// --------PingPong/
// ----PRECOMPILED_OBJECT_TYPE_KATA/
// --------...
// ----...
//
// defaultPrecompiledObject.info is a file that contains path to the default example:
// {
// "SDK_JAVA": "SDK_JAVA/PRECOMPILED_OBJECT_TYPE_EXAMPLE/MinimalWordCount"
// }
//
// meta.info is a json file that has the following fields:
// {
// "name": "name of the example",
// "description": "Description of an example",
// "multifile": false
// "categories": ["Common", "IO"]
// "pipeline_options": "--key1 value1",
// "default_example": false,
// "context_line": 1,
// "link": "https://github.com/apache/beam/blob/master/path/to/example"
// }
//
type CloudStorage struct {
}
func New() *CloudStorage {
return &CloudStorage{}
}
// GetPrecompiledObject returns the precompiled example
func (cd *CloudStorage) GetPrecompiledObject(ctx context.Context, precompiledObjectPath, bucketName string) (*pb.PrecompiledObject, error) {
cloudPath := filepath.Join(precompiledObjectPath, MetaInfoName)
data, err := cd.getFileFromBucket(ctx, cloudPath, "", bucketName)
if err != nil {
return nil, err
}
precompiledObject := &pb.PrecompiledObject{}
err = json.Unmarshal(data, precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
return nil, err
}
precompiledObject.CloudPath = precompiledObjectPath
return precompiledObject, nil
}
// GetPrecompiledObjectCode returns the source code of the example
func (cd *CloudStorage) GetPrecompiledObjectCode(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
extension, err := getFileExtensionBySdk(precompiledObjectPath)
if err != nil {
return "", err
}
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, extension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectOutput returns the run output of the example
func (cd *CloudStorage) GetPrecompiledObjectOutput(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, OutputExtension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectLogs returns the logs of the example
func (cd *CloudStorage) GetPrecompiledObjectLogs(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, LogsExtension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectGraph returns the graph of the example
func (cd *CloudStorage) GetPrecompiledObjectGraph(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, GraphExtension, bucketName)
if err != nil {
return "", err
}
return string(data), nil
}
// GetPrecompiledObjects returns stored at the cloud storage bucket precompiled objects for the target category
func (cd *CloudStorage) GetPrecompiledObjects(ctx context.Context, targetSdk pb.Sdk, targetCategory, bucketName string) (*SdkToCategories, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, Timeout)
defer cancel()
precompiledObjects := make(SdkToCategories, 0)
bucket := client.Bucket(bucketName)
dirs, err := cd.getPrecompiledObjectsDirs(ctx, targetSdk, bucket)
if err != nil {
return nil, err
}
metaFiles := make(map[string][]byte, 0)
for objectDir := range dirs {
infoPath := filepath.Join(objectDir, MetaInfoName) // helping file with information about this object
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
metaFiles[objectDir] = metaFile
rc.Close()
}
for objectDir, metaFile := range metaFiles {
precompiledObject := ObjectInfo{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
continue
}
folderName := strings.Split(objectDir, string(os.PathSeparator))[1]
precompiledObject.Type = pb.PrecompiledObjectType(pb.PrecompiledObjectType_value[folderName])
for _, objectCategory := range precompiledObject.Categories {
if targetCategory == "" || targetCategory == objectCategory { //take only requested categories
appendPrecompiledObject(precompiledObject, &precompiledObjects, objectDir, objectCategory)
}
}
}
return &precompiledObjects, nil
}
// GetDefaultPrecompiledObjects returns the default precompiled objects
func (cd *CloudStorage) GetDefaultPrecompiledObjects(ctx context.Context, bucketName string) (map[pb.Sdk]*pb.PrecompiledObject, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
bucket := client.Bucket(bucketName)
paths := make(map[pb.Sdk]string, 0)
for _, sdkName := range pb.Sdk_name {
sdk := pb.Sdk(pb.Sdk_value[sdkName])
if sdk == pb.Sdk_SDK_UNSPECIFIED {
continue
}
path, err := cd.getDefaultPrecompiledObjectsPath(ctx, bucket, sdk)
if err != nil {
return nil, err
}
paths[sdk] = path
}
defaultPrecompiledObjects := make(map[pb.Sdk]*pb.PrecompiledObject, 0)
for sdk, path := range paths {
infoPath := filepath.Join(path, MetaInfoName)
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
rc.Close()
precompiledObject := &pb.PrecompiledObject{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
return nil, err
}
precompiledObject.CloudPath = path
defaultPrecompiledObjects[sdk] = precompiledObject
}
return defaultPrecompiledObjects, nil
}
// getDefaultPrecompiledObjectsPath returns path for SDK to the default precompiled object
func (cd *CloudStorage) getDefaultPrecompiledObjectsPath(ctx context.Context, bucket *storage.BucketHandle, sdk pb.Sdk) (string, error) {
pathToFile := fmt.Sprintf("%s/%s", sdk.String(), defaultPrecompiledObjectInfo)
rc, err := bucket.Object(pathToFile).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", pathToFile, err.Error())
return "", err
}
data, err := io.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
return "", err
}
path := make(map[string]string, 0)
if err := json.Unmarshal(data, &path); err != nil {
return "", err
}
return path[sdk.String()], nil
}
// getPrecompiledObjectsDirs finds directories with precompiled objects
// Since there is no notion of directory at cloud storage, then
// to avoid duplicates of a base path (directory) need to store it in a set/map.
func (cd *CloudStorage) getPrecompiledObjectsDirs(ctx context.Context, targetSdk pb.Sdk, bucket *storage.BucketHandle) (map[string]bool, error) {
prefix := targetSdk.String()
if targetSdk == pb.Sdk_SDK_UNSPECIFIED {
prefix = ""
}
it := bucket.Objects(ctx, &storage.Query{
Prefix: prefix,
})
objectDirs := make(map[string]bool, 0)
for {
attrs, err := it.Next()
if err == iterator.Done {
break | }
if err != nil { | random_line_split |
|
precompiled_objects.go | WordCount.go
// ----------- MinimalWordCount.output
// ----------- MinimalWordCount.log
// ----------- MinimalWordCount.graph
// ----------- meta.info
// --------PingPong/
// ----PRECOMPILED_OBJECT_TYPE_KATA/
// --------...
// ----...
//
// defaultPrecompiledObject.info is a file that contains path to the default example:
// {
// "SDK_JAVA": "SDK_JAVA/PRECOMPILED_OBJECT_TYPE_EXAMPLE/MinimalWordCount"
// }
//
// meta.info is a json file that has the following fields:
// {
// "name": "name of the example",
// "description": "Description of an example",
// "multifile": false
// "categories": ["Common", "IO"]
// "pipeline_options": "--key1 value1",
// "default_example": false,
// "context_line": 1,
// "link": "https://github.com/apache/beam/blob/master/path/to/example"
// }
//
type CloudStorage struct {
}
func New() *CloudStorage {
return &CloudStorage{}
}
// GetPrecompiledObject returns the precompiled example
func (cd *CloudStorage) GetPrecompiledObject(ctx context.Context, precompiledObjectPath, bucketName string) (*pb.PrecompiledObject, error) {
cloudPath := filepath.Join(precompiledObjectPath, MetaInfoName)
data, err := cd.getFileFromBucket(ctx, cloudPath, "", bucketName)
if err != nil {
return nil, err
}
precompiledObject := &pb.PrecompiledObject{}
err = json.Unmarshal(data, precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
return nil, err
}
precompiledObject.CloudPath = precompiledObjectPath
return precompiledObject, nil
}
// GetPrecompiledObjectCode returns the source code of the example
func (cd *CloudStorage) GetPrecompiledObjectCode(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
extension, err := getFileExtensionBySdk(precompiledObjectPath)
if err != nil {
return "", err
}
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, extension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectOutput returns the run output of the example
func (cd *CloudStorage) GetPrecompiledObjectOutput(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, OutputExtension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectLogs returns the logs of the example
func (cd *CloudStorage) GetPrecompiledObjectLogs(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, LogsExtension, bucketName)
if err != nil {
return "", err
}
result := string(data)
return result, nil
}
// GetPrecompiledObjectGraph returns the graph of the example
func (cd *CloudStorage) GetPrecompiledObjectGraph(ctx context.Context, precompiledObjectPath, bucketName string) (string, error) {
data, err := cd.getFileFromBucket(ctx, precompiledObjectPath, GraphExtension, bucketName)
if err != nil {
return "", err
}
return string(data), nil
}
// GetPrecompiledObjects returns stored at the cloud storage bucket precompiled objects for the target category
func (cd *CloudStorage) GetPrecompiledObjects(ctx context.Context, targetSdk pb.Sdk, targetCategory, bucketName string) (*SdkToCategories, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(ctx, Timeout)
defer cancel()
precompiledObjects := make(SdkToCategories, 0)
bucket := client.Bucket(bucketName)
dirs, err := cd.getPrecompiledObjectsDirs(ctx, targetSdk, bucket)
if err != nil {
return nil, err
}
metaFiles := make(map[string][]byte, 0)
for objectDir := range dirs {
infoPath := filepath.Join(objectDir, MetaInfoName) // helping file with information about this object
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
metaFiles[objectDir] = metaFile
rc.Close()
}
for objectDir, metaFile := range metaFiles {
precompiledObject := ObjectInfo{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
continue
}
folderName := strings.Split(objectDir, string(os.PathSeparator))[1]
precompiledObject.Type = pb.PrecompiledObjectType(pb.PrecompiledObjectType_value[folderName])
for _, objectCategory := range precompiledObject.Categories {
if targetCategory == "" || targetCategory == objectCategory { //take only requested categories
appendPrecompiledObject(precompiledObject, &precompiledObjects, objectDir, objectCategory)
}
}
}
return &precompiledObjects, nil
}
// GetDefaultPrecompiledObjects returns the default precompiled objects
func (cd *CloudStorage) GetDefaultPrecompiledObjects(ctx context.Context, bucketName string) (map[pb.Sdk]*pb.PrecompiledObject, error) {
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
return nil, fmt.Errorf("storage.NewClient: %v", err)
}
defer client.Close()
bucket := client.Bucket(bucketName)
paths := make(map[pb.Sdk]string, 0)
for _, sdkName := range pb.Sdk_name {
sdk := pb.Sdk(pb.Sdk_value[sdkName])
if sdk == pb.Sdk_SDK_UNSPECIFIED {
continue
}
path, err := cd.getDefaultPrecompiledObjectsPath(ctx, bucket, sdk)
if err != nil {
return nil, err
}
paths[sdk] = path
}
defaultPrecompiledObjects := make(map[pb.Sdk]*pb.PrecompiledObject, 0)
for sdk, path := range paths {
infoPath := filepath.Join(path, MetaInfoName)
rc, err := bucket.Object(infoPath).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", infoPath, err.Error())
continue
}
metaFile, err := ioutil.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
continue
}
rc.Close()
precompiledObject := &pb.PrecompiledObject{}
err = json.Unmarshal(metaFile, &precompiledObject)
if err != nil {
logger.Errorf("json.Unmarshal: %v", err.Error())
return nil, err
}
precompiledObject.CloudPath = path
defaultPrecompiledObjects[sdk] = precompiledObject
}
return defaultPrecompiledObjects, nil
}
// getDefaultPrecompiledObjectsPath returns path for SDK to the default precompiled object
func (cd *CloudStorage) getDefaultPrecompiledObjectsPath(ctx context.Context, bucket *storage.BucketHandle, sdk pb.Sdk) (string, error) {
pathToFile := fmt.Sprintf("%s/%s", sdk.String(), defaultPrecompiledObjectInfo)
rc, err := bucket.Object(pathToFile).NewReader(ctx)
if err != nil {
logger.Errorf("Object(%q).NewReader: %v", pathToFile, err.Error())
return "", err
}
data, err := io.ReadAll(rc)
if err != nil {
logger.Errorf("ioutil.ReadAll: %v", err.Error())
return "", err
}
path := make(map[string]string, 0)
if err := json.Unmarshal(data, &path); err != nil {
return "", err
}
return path[sdk.String()], nil
}
// getPrecompiledObjectsDirs finds directories with precompiled objects
// Since there is no notion of directory at cloud storage, then
// to avoid duplicates of a base path (directory) need to store it in a set/map.
func (cd *CloudStorage) getPrecompiledObjectsDirs(ctx context.Context, targetSdk pb.Sdk, bucket *storage.BucketHandle) (map[string]bool, error) {
prefix := targetSdk.String()
if targetSdk == pb.Sdk_SDK_UNSPECIFIED {
prefix = ""
}
it := bucket.Objects(ctx, &storage.Query{
Prefix: prefix,
})
objectDirs := make(map[string]bool, 0)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil | {
bucketAttrs, errWithAttrs := bucket.Attrs(ctx)
if errWithAttrs != nil {
return nil, fmt.Errorf("error during receiving bucket's attributes: %s", err)
}
return nil, fmt.Errorf("Bucket(%q).Objects: %v", bucketAttrs.Name, err)
} | conditional_block |
|
load.py | _indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
'''
这里考虑到了一个问题:
如果再次运行程序,就会产生不同的测试集,多次运行后,你就会得到整个数据集
解决方法:1)设置随机种子;2)保存训练集与测试集
但是这两种方法都是一种静态的方法,党员是数据发生变化的时候,这些方法将出现问题
所以最后采用对每个实例赋予识别码,判断实例是否应该放入测试集
新的测试集将包含新实例中的20%,但是不会有之前在训练集的实例
'''
import hashlib
'''
下面的这个方法是利用了哈希索引的思想
将行号放入哈希表中进行判断(重点是256*0.2这个理解),256的来源为2^8==256,因为这是取最后一个字节
'''
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
'''
本函数是用来通过某一列进行训练集与测试集的划分工作
:param data: 原始数据集
:param test_ratio: 测试集的大小
:param id_column: 划分依照的列的属性
'''
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash)) # 这个id_我还是没明白是怎么回事
return data.loc[~in_test_set], data.loc[in_test_set] # 细节使用~
housing_with_id = data.reset_index() # reset_index()增加一列作为索引"index",作为行索引
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
# 但考虑到数据集发生变化的话,行索引就会出现问题,我们就采用经纬度进行标记
housing_with_id["id"] = data["longitude"] * 1000 + data["latitude"]
temp_column = housing_with_id.pop("id")
housing_with_id.insert(0, "id", temp_column) # 这个就是用来在第0列插入一个索引
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
# 2.2 sklearn自带的方法
from sklearn.model_selection import train_test_split # sklearn中内置的数据集取样的方法
train_set, test_set = train_test_split(data, test_size=0.2, random_state=42)
# 3. 分层抽样
'''
前提是专家告诉我们,收入中位数是预测房价中位数非常重要的属性,故我们进行数据集的划分时要考虑到收入中位数
1)首先创建收入类别属性
2)然后针对收入分层的结果进行划分(每个收入类选择20%作为测试集)
'''
data["income_cat"] = np.ceil(data["median_income"] / 1.5) # ceil是进行数据取舍,以产生离散的分类
# 注:inplace的含义就是如果True则是在原DataFrame上面进行相关的操作;如果是False就是新形成一个DataFrame保留原本的DataFrame
data["income_cat"].where(data["income_cat"] < 5, 5.0, inplace=True) # where实际上可以看成一个for循环与if else块的合成
# print(data.ix[:, ["id", "income_cat"]])
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data, data["income_cat"]): # 这段代码就是分层获取训练集与测试集的索引
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]
print(data["income_cat"].value_counts() / len(data)) # 查看分层的大小
print(strat_train_set["income_cat"].value_counts() / len(strat_train_set))
for set in (strat_train_set, strat_test_set):
set.drop(["income_cat"], axis=1, inplace=True)
housing = strat_train_set.copy()
# 画出坐标的散点图
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1) # 这个就是画散点图
# plt.show() # 在pycharm一定要加这个不然图像显示不了
# 画出每个点的房价情况,s:表示人口数量;c:表示该地区房价的中位数;cmap:用来定义颜色
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=housing["population"]/100, label="population",
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
)
plt.legend()
# plt.show()
# 4. 测算各个属性之间的关系系数
corr_matrix = housing.corr() # 初始化一个实例,corr()使用的是Pearson相关系数
print(corr_matrix["median_house_value"].sort_values(ascending=False)) # 调用median_house_value这个属性测算其他属性于这个属性之间的系数关系
from pandas.plotting import scatter_matrix # 这个案例上写错了,scatter_matrix表示画出attributes中个属性之间的关系
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
# plt.show()
# 5. 属性组合实验
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
# 6. 数据处理
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
# 6.1 数据清洗
median = housing["total_bedrooms"].median()
housing["total_bedrooms"].fillna(median)
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median") # 这个strategy有三种median/mean/most_frequent
housing_num = housing.drop("ocean_proximity", axis=1) # 删除ocean_proximity这个参数因为它本身不是一个数值
imputer.fit(housing_num) # 重点的方法,fit()方法将实例拟合到训练数据中
all_median = imputer.statistics_ # 所有数据的中间值
X = imputer.transform(housing_num) # 重点的方法,transform()将中值补充进入缺失值,这是个数组 | housing_tr = pd.DataFrame(X, columns=housing_num.columns)
# 6.2 处理文本
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder() # 这个是用来将字符串编码为数字(数字的选取都在字符串的长度-1)
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform(housing_cat) # 这里有两个fit,fit+transform就等于fit_transform
print(housing_cat_encoded)
# 6.3 处理文本(使用one_hot)
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
# 注意这里要求使用reshape(1, -1),原因在于fit_transform()需要传入一个2D的数组
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(1, -1)) # 这个结果是一个稀疏矩阵
print(housing_cat_1hot)
print(housing_cat_1hot.toarray())
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing_cat) # 一步转换
print(housing_cat_1hot)
# 6.4 自定义文本转化器
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
'''
仔细看了下这个类,其实就是用来增加三个或者两个自定义的属性,便于分析
主要是将步骤5中的组合属性用到了这个地方
'''
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
# 用np.c_()将XX,YY拉平后的两个array按照列合并(此时 | random_line_split |
|
load.py | SV文件,返回一个相应的数据类型
'''
csv_path = os.path.join('./', housing_path, "housing.csv")
data = pd.read_csv(csv_path)
# print(data)
return data
data = load_housing_data()
# df.value_counts() 可以帮助我们统计每一列数据的分布情况
# df.describe() 可以帮助我们整体了解数据集的情况,包含count,mean,min,max,std(标准差)等等
print(data["ocean_proximity"].value_counts())
print(data.describe())
import matplotlib.pyplot as plt
# hist()画出每个数值属性的柱状图
# data.hist(bins=50, figsize=(20, 15))
# plt.show()
# 2. 划分测试集与训练集(随机取样)
import numpy as np
# 2.1 自己写的划分方法
def split_train_test(data, test_ratio):
'''
对数据划分训练集与测试集
:param data: 原始数据
:param test_ratio: 测试集的大小
:return: (训练集, 测试集)
'''
shuffled_indices = np.random.permutation(len(data)) # permutation(x)将x中的打乱重排
test_set_size = round(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
'''
这里考虑到了一个问题:
如果再次运行程序,就会产生不同的测试集,多次运行后,你就会得到整个数据集
解决方法:1)设置随机种子;2)保存训练集与测试集
但是这两种方法都是一种静态的方法,党员是数据发生变化的时候,这些方法将出现问题
所以最后采用对每个实例赋予识别码,判断实例是否应该放入测试集
新的测试集将包含新实例中的20%,但是不会有之前在训练集的实例
'''
import hashlib
'''
下面的这个方法是利用了哈希索引的思想
将行号放入哈希表中进行判断(重点是256*0.2这个理解),256的来源为2^8==256,因为这是取最后一个字节
'''
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
'''
本函数是用来通过某一列进行训练集与测试集的划分工作
:param data: 原始数据集
:param test_ratio: 测试集的大小
:param id_column: 划分依照的列的属性
'''
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash)) # 这个id_我还是没明白是怎么回事
return data.loc[~in_test_set], data.loc[in_test_set] # 细节使用~
housing_with_id = data.reset_index() # reset_index()增加一列作为索引"index",作为行索引
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
# 但考虑到数据集发生变化的话,行索引就会出现问题,我们就采用经纬度进行标记
housing_with_id["id"] = data["longitude"] * 1000 + data["latitude"]
temp_column = housing_with_id.pop("id")
housing_with_id.insert(0, "id", temp_column) # 这个就是用来在第0列插入一个索引
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
# 2.2 sklearn自带的方法
from sklearn.model_selection import train_test_split # sklearn中内置的数据集取样的方法
train_set, test_set = train_test_split(data, test_size=0.2, random_state=42)
# 3. 分层抽样
'''
前提是专家告诉我们,收入中位数是预测房价中位数非常重要的属性,故我们进行数据集的划分时要考虑到收入中位数
1)首先创建收入类别属性
2)然后针对收入分层的结果进行划分(每个收入类选择20%作为测试集)
'''
data["income_cat"] = np.ceil(data["median_income"] / 1.5) # ceil是进行数据取舍,以产生离散的分类
# 注:inplace的含义就是如果True则是在原DataFrame上面进行相关的操作;如果是False就是新形成一个DataFrame保留原本的DataFrame
data["income_cat"].where(data["income_cat"] < 5, 5.0, inplace=True) # where实际上可以看成一个for循环与if else块的合成
# print(data.ix[:, ["id", "income_cat"]])
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data, data["income_cat"]): # 这段代码就是分层获取训练集与测试集的索引
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]
print(data["income_cat"].value_counts() / len(data)) # 查看分层的大小
print(strat_train_set["income_cat"].value_counts() / len(strat_train_set))
for set in (strat_train_set, strat_test_set):
set.drop(["income_cat"], axis=1, inplace=True)
housing = strat_train_set.copy()
# 画出坐标的散点图
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1) # 这个就是画散点图
# plt.show() # 在pycharm一定要加这个不然图像显示不了
# 画出每个点的房价情况,s:表示人口数量;c:表示该地区房价的中位数;cmap:用来定义颜色
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=housing["population"]/100, label="population",
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
)
plt.legend()
# plt.show()
# 4. 测算各个属性之间的关系系数
corr_matrix = housing.corr() # 初始化一个实例,corr()使用的是Pearson相关系数
print(corr_matrix["median_house_value"].sort_values(ascending=False)) # 调用median_house_value这个属性测算其他属性于这个属性之间的系数关系
from pandas.plotting import scatter_matrix # 这个案例上写错了,scatter_matrix表示画出attributes中个属性之间的关系
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
# plt.show()
# 5. 属性组合实验
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
# 6. 数据处理
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
# 6.1 数据清洗
median = housing["total_bedrooms"].median()
housing["total_bedrooms"].fillna(median)
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median") # 这个strategy有三种median/mean/most_frequent
housing_num = housing.drop("ocean_proximity", axis=1) # 删除ocean_proximity这个参数因为它本身不是一个数值
imputer.fit(housing_num) # 重点的方法,fit()方法将实例拟合到训练数据中
all_median = imputer.statistics_ # 所有数据的中间值
X = imputer.transform(housing_num) # 重点的方法,transform()将中值补充进入缺失值,这是个数组
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
# 6.2 处理文本
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder() # 这个是用来将字符串编码为数字(数字的选取都在字符串的长度-1)
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform(housing_cat) # 这里有两个fit,fit+transform就等于fit_transform
print(housing_cat_encoded)
# 6.3 处理文本(使用one_hot)
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
# 注意这里要求使用reshape(1, -1),原因 | os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
# fetch_housing_data()
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
'''
利用pandas读取C | identifier_body |
|
load.py | ("id")
housing_with_id.insert(0, "id", temp_column) # 这个就是用来在第0列插入一个索引
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
# 2.2 sklearn自带的方法
from sklearn.model_selection import train_test_split # sklearn中内置的数据集取样的方法
train_set, test_set = train_test_split(data, test_size=0.2, random_state=42)
# 3. 分层抽样
'''
前提是专家告诉我们,收入中位数是预测房价中位数非常重要的属性,故我们进行数据集的划分时要考虑到收入中位数
1)首先创建收入类别属性
2)然后针对收入分层的结果进行划分(每个收入类选择20%作为测试集)
'''
data["income_cat"] = np.ceil(data["median_income"] / 1.5) # ceil是进行数据取舍,以产生离散的分类
# 注:inplace的含义就是如果True则是在原DataFrame上面进行相关的操作;如果是False就是新形成一个DataFrame保留原本的DataFrame
data["income_cat"].where(data["income_cat"] < 5, 5.0, inplace=True) # where实际上可以看成一个for循环与if else块的合成
# print(data.ix[:, ["id", "income_cat"]])
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data, data["income_cat"]): # 这段代码就是分层获取训练集与测试集的索引
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]
print(data["income_cat"].value_counts() / len(data)) # 查看分层的大小
print(strat_train_set["income_cat"].value_counts() / len(strat_train_set))
for set in (strat_train_set, strat_test_set):
set.drop(["income_cat"], axis=1, inplace=True)
housing = strat_train_set.copy()
# 画出坐标的散点图
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1) # 这个就是画散点图
# plt.show() # 在pycharm一定要加这个不然图像显示不了
# 画出每个点的房价情况,s:表示人口数量;c:表示该地区房价的中位数;cmap:用来定义颜色
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=housing["population"]/100, label="population",
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
)
plt.legend()
# plt.show()
# 4. 测算各个属性之间的关系系数
corr_matrix = housing.corr() # 初始化一个实例,corr()使用的是Pearson相关系数
print(corr_matrix["median_house_value"].sort_values(ascending=False)) # 调用median_house_value这个属性测算其他属性于这个属性之间的系数关系
from pandas.plotting import scatter_matrix # 这个案例上写错了,scatter_matrix表示画出attributes中个属性之间的关系
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
# plt.show()
# 5. 属性组合实验
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
# 6. 数据处理
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
# 6.1 数据清洗
median = housing["total_bedrooms"].median()
housing["total_bedrooms"].fillna(median)
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median") # 这个strategy有三种median/mean/most_frequent
housing_num = housing.drop("ocean_proximity", axis=1) # 删除ocean_proximity这个参数因为它本身不是一个数值
imputer.fit(housing_num) # 重点的方法,fit()方法将实例拟合到训练数据中
all_median = imputer.statistics_ # 所有数据的中间值
X = imputer.transform(housing_num) # 重点的方法,transform()将中值补充进入缺失值,这是个数组
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
# 6.2 处理文本
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder() # 这个是用来将字符串编码为数字(数字的选取都在字符串的长度-1)
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform(housing_cat) # 这里有两个fit,fit+transform就等于fit_transform
print(housing_cat_encoded)
# 6.3 处理文本(使用one_hot)
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
# 注意这里要求使用reshape(1, -1),原因在于fit_transform()需要传入一个2D的数组
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(1, -1)) # 这个结果是一个稀疏矩阵
print(housing_cat_1hot)
print(housing_cat_1hot.toarray())
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing_cat) # 一步转换
print(housing_cat_1hot)
# 6.4 自定义文本转化器
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
'''
仔细看了下这个类,其实就是用来增加三个或者两个自定义的属性,便于分析
主要是将步骤5中的组合属性用到了这个地方
'''
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
# 用np.c_()将XX,YY拉平后的两个array按照列合并(此时是n*2的举证,有n个样本点,每个样本点有横纵2维),然后调用分类器集合的decision_function函数获得样本到超平面的距离。Z是一个n*1的矩阵(列向量),记录了n个样本距离超平面的距离。
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
# 6.5 缩放处理(标准化)
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
'''
这里使用了相当重要的pipeline这个方法
pipeline就是一个流水线的作业方式
'''
num_pipeline = Pipeline([
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
class DataFrameSelector(BaseEstimator, TransformerMixin):
'''
sklearn没有工具来处理Pandas DataFrame,所以要写一个
简单的自定义转换器来做这个工作
'''
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
class MyLabelBinarizer(TransformerMixin): # 如果不写这个转换公式就会报错,这个切记
def __init__(self, *args, **kwargs):
self.encoder = LabelBinarizer(*args, **kwargs)
def fit(self, x, y=0):
self.encoder.fit(x)
return self
def transform(self, x, y=0):
return self.encoder.transform(x)
from sklearn.pipeline import FeatureUnion
'''
*****利用FeatureUnion将多个pipeline合在一起,
*****充分利用流水线进行相关的操作
'''
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdde | r() | identifier_name |
|
load.py |
'''
前提是专家告诉我们,收入中位数是预测房价中位数非常重要的属性,故我们进行数据集的划分时要考虑到收入中位数
1)首先创建收入类别属性
2)然后针对收入分层的结果进行划分(每个收入类选择20%作为测试集)
'''
data["income_cat"] = np.ceil(data["median_income"] / 1.5) # ceil是进行数据取舍,以产生离散的分类
# 注:inplace的含义就是如果True则是在原DataFrame上面进行相关的操作;如果是False就是新形成一个DataFrame保留原本的DataFrame
data["income_cat"].where(data["income_cat"] < 5, 5.0, inplace=True) # where实际上可以看成一个for循环与if else块的合成
# print(data.ix[:, ["id", "income_cat"]])
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data, data["income_cat"]): # 这段代码就是分层获取训练集与测试集的索引
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]
print(data["income_cat"].value_counts() / len(data)) # 查看分层的大小
print(strat_train_set["income_cat"].value_counts() / len(strat_train_set))
for set in (strat_train_set, strat_test_set):
set.drop(["income_cat"], axis=1, inplace=True)
housing = strat_train_set.copy()
# 画出坐标的散点图
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1) # 这个就是画散点图
# plt.show() # 在pycharm一定要加这个不然图像显示不了
# 画出每个点的房价情况,s:表示人口数量;c:表示该地区房价的中位数;cmap:用来定义颜色
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=housing["population"]/100, label="population",
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
)
plt.legend()
# plt.show()
# 4. 测算各个属性之间的关系系数
corr_matrix = housing.corr() # 初始化一个实例,corr()使用的是Pearson相关系数
print(corr_matrix["median_house_value"].sort_values(ascending=False)) # 调用median_house_value这个属性测算其他属性于这个属性之间的系数关系
from pandas.plotting import scatter_matrix # 这个案例上写错了,scatter_matrix表示画出attributes中个属性之间的关系
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# plt.show()
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
# plt.show()
# 5. 属性组合实验
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
corr_matrix = housing.corr()
print(corr_matrix["median_house_value"].sort_values(ascending=False))
# 6. 数据处理
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
# 6.1 数据清洗
median = housing["total_bedrooms"].median()
housing["total_bedrooms"].fillna(median)
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median") # 这个strategy有三种median/mean/most_frequent
housing_num = housing.drop("ocean_proximity", axis=1) # 删除ocean_proximity这个参数因为它本身不是一个数值
imputer.fit(housing_num) # 重点的方法,fit()方法将实例拟合到训练数据中
all_median = imputer.statistics_ # 所有数据的中间值
X = imputer.transform(housing_num) # 重点的方法,transform()将中值补充进入缺失值,这是个数组
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
# 6.2 处理文本
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder() # 这个是用来将字符串编码为数字(数字的选取都在字符串的长度-1)
housing_cat = housing["ocean_proximity"]
housing_cat_encoded = encoder.fit_transform(housing_cat) # 这里有两个fit,fit+transform就等于fit_transform
print(housing_cat_encoded)
# 6.3 处理文本(使用one_hot)
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
# 注意这里要求使用reshape(1, -1),原因在于fit_transform()需要传入一个2D的数组
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(1, -1)) # 这个结果是一个稀疏矩阵
print(housing_cat_1hot)
print(housing_cat_1hot.toarray())
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing_cat) # 一步转换
print(housing_cat_1hot)
# 6.4 自定义文本转化器
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
'''
仔细看了下这个类,其实就是用来增加三个或者两个自定义的属性,便于分析
主要是将步骤5中的组合属性用到了这个地方
'''
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
# 用np.c_()将XX,YY拉平后的两个array按照列合并(此时是n*2的举证,有n个样本点,每个样本点有横纵2维),然后调用分类器集合的decision_function函数获得样本到超平面的距离。Z是一个n*1的矩阵(列向量),记录了n个样本距离超平面的距离。
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
# 6.5 缩放处理(标准化)
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
'''
这里使用了相当重要的pipeline这个方法
pipeline就是一个流水线的作业方式
'''
num_pipeline = Pipeline([
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
class DataFrameSelector(BaseEstimator, TransformerMixin):
'''
sklearn没有工具来处理Pandas DataFrame,所以要写一个
简单的自定义转换器来做这个工作
'''
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
class MyLabelBinarizer(TransformerMixin): # 如果不写这个转换公式就会报错,这个切记
def __init__(self, *args, **kwargs):
self.encoder = LabelBinarizer(*args, **kwargs)
def fit(self, x, y=0):
self.encoder.fit(x)
return self
def transform(self, x, y=0):
return self.encoder.transform(x)
from sklearn.pipeline import FeatureUnion
'''
*****利用FeatureUnion将多个pipeline合在一起,
*****充分利用流水线进行相关的操作
'''
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler())
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)),
('label_binarizer', MyLabelBinarizer()),
])
full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
] | )
housing_prepared = full_pipeline.fit_transform(X=housing)
housing_temp = pd.DataFrame(housing_prepared) # 因为这个columns已经更新了不能使用之前的columns
print(housing_prepared)
# 7. 线性 | conditional_block |
|
targets.go | NamedOrString(s *scope, name string, obj pyObject, anon func(core.BuildInput), named func(string, core.BuildInput), systemAllowed, tool bool) {
if obj == nil {
return
}
if str, ok := asString(obj); ok {
if bi := parseBuildInput(s, str, name, systemAllowed, tool); bi != nil {
anon(bi)
}
return
}
addMaybeNamed(s, name, obj, anon, named, systemAllowed, tool)
}
// addMaybeNamedOutput adds outputs to a target, possibly in a named group
func addMaybeNamedOutput(s *scope, name string, obj pyObject, anon func(string), named func(string, string), t *core.BuildTarget, optional bool) {
if obj == nil {
return
}
if l, ok := asList(obj); ok {
for _, li := range l {
if li != None {
out, ok := li.(pyString)
s.Assert(ok, "outs must be strings")
anon(string(out))
if !optional || !strings.HasPrefix(string(out), "*") {
s.pkg.MustRegisterOutput(s.state, string(out), t)
}
}
}
} else if d, ok := asDict(obj); ok {
s.Assert(named != nil, "%s cannot be given as a dict", name)
for k, v := range d {
l, ok := asList(v)
s.Assert(ok, "Values must be lists of strings")
for _, li := range l {
if li != None {
out, ok := li.(pyString)
s.Assert(ok, "outs must be strings")
named(k, string(out))
if !optional || !strings.HasPrefix(string(out), "*") {
s.pkg.MustRegisterOutput(s.state, string(out), t)
}
}
}
}
} else if obj != None {
s.Assert(false, "Argument %s must be a list or dict, not %s", name, obj.Type())
}
}
// addMaybeNamedSecret adds outputs to a target, possibly in a named group
func addMaybeNamedSecret(s *scope, name string, obj pyObject, anon func(string), named func(string, string), t *core.BuildTarget, optional bool) {
validateSecret := func(secret string) {
s.NAssert(strings.HasPrefix(secret, "//"),
"Secret %s of %s cannot be a build label", secret, t.Label.Name)
s.Assert(filepath.IsAbs(secret) || strings.HasPrefix(secret, "~"),
"Secret '%s' of %s is not an absolute path", secret, t.Label.Name)
}
if obj == nil {
return
}
if l, ok := asList(obj); ok {
for _, li := range l {
if li != None {
out, ok := li.(pyString)
s.Assert(ok, "secrets must be strings")
validateSecret(string(out))
anon(string(out))
}
}
} else if d, ok := asDict(obj); ok {
s.Assert(named != nil, "%s cannot be given as a dict", name)
for k, v := range d {
l, ok := asList(v)
s.Assert(ok, "Values must be lists of strings")
for _, li := range l {
if li != None {
out, ok := li.(pyString)
s.Assert(ok, "outs must be strings")
validateSecret(string(out))
named(k, string(out))
}
}
}
} else if obj != None {
s.Assert(false, "Argument %s must be a list or dict, not %s", name, obj.Type())
}
}
// addDependencies adds dependencies to a target, which may or may not be exported.
func addDependencies(s *scope, name string, obj pyObject, target *core.BuildTarget, exported, internal bool) {
addStrings(s, name, obj, func(str string) {
if s.state.Config.Bazel.Compatibility && !core.LooksLikeABuildLabel(str) && !strings.HasPrefix(str, "@") {
// *sigh*... Bazel seems to allow an implicit : on the start of dependencies
str = ":" + str
}
target.AddMaybeExportedDependency(checkLabel(s, s.parseLabelInPackage(str, s.pkg)), exported, false, internal)
})
}
// addStrings adds an arbitrary set of strings to the target (e.g. labels etc).
func addStrings(s *scope, name string, obj pyObject, f func(string)) {
if obj != nil && obj != None {
l, ok := asList(obj)
if !ok {
s.Error("Argument %s must be a list, not %s", name, obj.Type())
}
for _, li := range l {
str, ok := li.(pyString)
if !ok && li != None {
s.Error("%s must be strings", name)
}
if str != "" && li != None {
f(string(str))
}
}
}
}
// addProvides adds a set of provides to the target, which is a dict of string -> label
func addProvides(s *scope, name string, obj pyObject, t *core.BuildTarget) {
if obj != nil && obj != None {
d, ok := asDict(obj)
s.Assert(ok, "Argument %s must be a dict, not %s, %v", name, obj.Type(), obj)
for k, v := range d {
str, ok := v.(pyString)
s.Assert(ok, "%s values must be strings", name)
t.AddProvide(k, checkLabel(s, s.parseLabelInPackage(string(str), s.pkg)))
}
}
}
// parseVisibility converts a visibility string to a build label.
// Mostly they are just build labels but other things are allowed too (e.g. "PUBLIC").
func parseVisibility(s *scope, vis string) core.BuildLabel {
if vis == "PUBLIC" || (s.state.Config.Bazel.Compatibility && vis == "//visibility:public") {
return core.WholeGraph[0]
}
l := s.parseLabelInPackage(vis, s.pkg)
if s.state.Config.Bazel.Compatibility {
// Bazel has a couple of special aliases for this stuff.
if l.Name == "__pkg__" {
l.Name = "all"
} else if l.Name == "__subpackages__" {
l.Name = "..."
}
}
return l
}
func parseBuildInput(s *scope, in pyObject, name string, systemAllowed, tool bool) core.BuildInput {
src, ok := in.(pyString)
if !ok {
s.Assert(in == None, "Items in %s must be strings", name)
return nil
}
return parseSource(s, string(src), systemAllowed, tool)
}
// parseSource parses an incoming source label as either a file or a build label.
// Identifies if the file is owned by this package and returns an error if not.
func parseSource(s *scope, src string, systemAllowed, tool bool) core.BuildInput {
if core.LooksLikeABuildLabel(src) {
pkg := s.pkg
if tool && s.pkg.Subrepo != nil && s.pkg.Subrepo.IsCrossCompile {
// Tools should be parsed with the host OS and arch
pkg = &core.Package{
Name: pkg.Name,
}
}
label := s.parseAnnotatedLabelInPackage(src, pkg)
if l, ok := label.Label(); ok {
checkLabel(s, l)
}
return label
}
s.Assert(src != "", "Empty source path")
s.Assert(!strings.Contains(src, "../"), "%s is an invalid path; build target paths can't contain ../", src)
if filepath.IsAbs(src) || src[0] == '~' {
s.Assert(systemAllowed, "%s is an absolute path; that's not allowed", src)
return core.SystemFileLabel{Path: strings.TrimRight(src, "/")}
} else if tool {
// "go" as a source is interpreted as a file, as a tool it's interpreted as something on the PATH.
return core.SystemPathLabel{Name: src, Path: s.state.Config.Path()}
}
src = strings.TrimPrefix(src, "./")
return core.NewFileLabel(src, s.pkg)
}
// checkLabel checks that the given build label is not a pseudo-label.
// These are disallowed in (nearly) all contexts.
func checkLabel(s *scope, label core.BuildLabel) core.BuildLabel {
s.NAssert(label.IsAllTargets(), ":all labels are not permitted here")
s.NAssert(label.IsAllSubpackages(), "... labels are not permitted here")
return label
}
// callbackFunction extracts a pre- or post-build function for a target.
func callbackFunction(s *scope, name string, obj pyObject, requiredArguments int, arguments string) *pyFunc {
if obj != nil && obj != None {
f := obj.(*pyFunc)
s.Assert(len(f.args) == requiredArguments, "%s callbacks must take exactly %d %s (%s takes %d)", name, requiredArguments, arguments, f.name, len(f.args))
return f
}
return nil
}
// A preBuildFunction implements the core.PreBuildFunction interface
type preBuildFunction struct {
f *pyFunc
s *scope
}
func (f *preBuildFunction) | Call | identifier_name |
|
targets.go | target.Test.Flakiness = 1
} else {
target.Test.Flakiness = uint8(i)
target.AddLabel("flaky")
}
}
} else {
target.Test.Flakiness = 1
}
if testCmd != nil && testCmd != None {
target.Test.Command, target.Test.Commands = decodeCommands(s, args[testCMDBuildRuleArgIdx])
}
target.Test.Timeout = sizeAndTimeout(s, size, args[testTimeoutBuildRuleArgIdx], s.state.Config.Test.Timeout)
target.Test.Sandbox = isTruthy(testSandboxBuildRuleArgIdx)
target.Test.NoOutput = isTruthy(noTestOutputBuildRuleArgIdx)
}
if err := validateSandbox(s.state, target); err != nil {
log.Fatal(err)
}
if s.state.Config.Build.Config == "dbg" {
target.Debug = new(core.DebugFields)
target.Debug.Command, _ = decodeCommands(s, args[debugCMDBuildRuleArgIdx])
}
return target
}
// validateSandbox ensures that the target isn't opting out of the build/test sandbox when it's not allowed to
func validateSandbox(state *core.BuildState, target *core.BuildTarget) error {
if target.IsFilegroup || len(state.Config.Sandbox.ExcludeableTargets) == 0 {
return nil
}
if !target.IsRemoteFile { | if target.Label.PackageName == "_please" {
return nil
}
for _, whitelist := range state.Config.Sandbox.ExcludeableTargets {
if whitelist.Matches(target.Label) {
return nil
}
}
for _, dir := range state.Config.Parse.ExperimentalDir {
if strings.HasPrefix(target.Label.PackageName, dir) {
return nil
}
}
return fmt.Errorf("%v is not whitelisted to opt out of the sandbox", target)
}
// sizeAndTimeout handles the size and build/test timeout arguments.
func sizeAndTimeout(s *scope, size *core.Size, timeout pyObject, defaultTimeout cli.Duration) time.Duration {
switch t := timeout.(type) {
case pyInt:
if t > 0 {
return time.Duration(t) * time.Second
}
case pyString:
return time.Duration(mustSize(s, string(t)).Timeout)
}
if size != nil {
return time.Duration(size.Timeout)
}
return time.Duration(defaultTimeout)
}
// mustSize looks up a size by name. It panics if it cannot be found.
func mustSize(s *scope, name string) *core.Size {
size, present := s.state.Config.Size[name]
s.Assert(present, "Unknown size %s", name)
return size
}
// decodeCommands takes a Python object and returns it as a string and a map; only one will be set.
func decodeCommands(s *scope, obj pyObject) (string, map[string]string) {
if obj == nil || obj == None {
return "", nil
} else if cmd, ok := obj.(pyString); ok {
return strings.TrimSpace(string(cmd)), nil
}
cmds, ok := asDict(obj)
s.Assert(ok, "Unknown type for command [%s]", obj.Type())
// Have to convert all the keys too
m := make(map[string]string, len(cmds))
for k, v := range cmds {
if v != None {
sv, ok := v.(pyString)
s.Assert(ok, "Unknown type for command")
m[k] = strings.TrimSpace(string(sv))
}
}
return "", m
}
// populateTarget sets the assorted attributes on a build target.
func populateTarget(s *scope, t *core.BuildTarget, args []pyObject) {
if t.IsRemoteFile {
for _, url := range mustList(args[urlsBuildRuleArgIdx]) {
t.AddSource(core.URLLabel(url.(pyString)))
}
} else if t.IsTextFile {
t.FileContent = args[fileContentArgIdx].(pyString).String()
}
addMaybeNamed(s, "srcs", args[srcsBuildRuleArgIdx], t.AddSource, t.AddNamedSource, false, false)
addMaybeNamedOrString(s, "tools", args[toolsBuildRuleArgIdx], t.AddTool, t.AddNamedTool, true, true)
addMaybeNamed(s, "system_srcs", args[systemSrcsBuildRuleArgIdx], t.AddSource, nil, true, false)
addMaybeNamed(s, "data", args[dataBuildRuleArgIdx], t.AddDatum, t.AddNamedDatum, false, false)
addMaybeNamedOutput(s, "outs", args[outsBuildRuleArgIdx], t.AddOutput, t.AddNamedOutput, t, false)
addMaybeNamedOutput(s, "optional_outs", args[optionalOutsBuildRuleArgIdx], t.AddOptionalOutput, nil, t, true)
addDependencies(s, "deps", args[depsBuildRuleArgIdx], t, false, false)
addDependencies(s, "exported_deps", args[exportedDepsBuildRuleArgIdx], t, true, false)
addDependencies(s, "internal_deps", args[internalDepsBuildRuleArgIdx], t, false, true)
addStrings(s, "labels", args[labelsBuildRuleArgIdx], t.AddLabel)
addStrings(s, "hashes", args[hashesBuildRuleArgIdx], t.AddHash)
addStrings(s, "licences", args[licencesBuildRuleArgIdx], t.AddLicence)
addStrings(s, "requires", args[requiresBuildRuleArgIdx], t.AddRequire)
if vis, ok := asList(args[visibilityBuildRuleArgIdx]); ok && len(vis) != 0 {
if v, ok := vis[0].(pyString); ok && v == "PUBLIC" {
t.Visibility = core.WholeGraph
} else {
addStrings(s, "visibility", args[visibilityBuildRuleArgIdx], func(str string) {
t.Visibility = append(t.Visibility, parseVisibility(s, str))
})
}
}
addEntryPoints(s, args[entryPointsArgIdx], t)
addEnv(s, args[envArgIdx], t)
addMaybeNamedSecret(s, "secrets", args[secretsBuildRuleArgIdx], t.AddSecret, t.AddNamedSecret, t, true)
addProvides(s, "provides", args[providesBuildRuleArgIdx], t)
if f := callbackFunction(s, "pre_build", args[preBuildBuildRuleArgIdx], 1, "argument"); f != nil {
t.PreBuildFunction = &preBuildFunction{f: f, s: s}
}
if f := callbackFunction(s, "post_build", args[postBuildBuildRuleArgIdx], 2, "arguments"); f != nil {
t.PostBuildFunction = &postBuildFunction{f: f, s: s}
}
if t.IsTest() {
addMaybeNamedOrString(s, "test_tools", args[testToolsBuildRuleArgIdx], t.AddTestTool, t.AddNamedTestTool, true, true)
addMaybeNamedOutput(s, "test_outputs", args[testOutputsBuildRuleArgIdx], t.AddTestOutput, nil, t, false)
}
if t.Debug != nil {
addMaybeNamed(s, "debug_data", args[debugDataBuildRuleArgIdx], t.AddDebugDatum, t.AddDebugNamedDatum, false, false)
addMaybeNamedOrString(s, "debug_tools", args[debugToolsBuildRuleArgIdx], t.AddDebugTool, t.AddNamedDebugTool, true, true)
}
}
// addEntryPoints adds entry points to a target
func addEntryPoints(s *scope, arg pyObject, target *core.BuildTarget) {
entryPointsPy, ok := asDict(arg)
s.Assert(ok, "entry_points must be a dict")
entryPoints := make(map[string]string, len(entryPointsPy))
for name, entryPointPy := range entryPointsPy {
entryPoint, ok := entryPointPy.(pyString)
s.Assert(ok, "Values of entry_points must be strings, found %v at key %v", entryPointPy.Type(), name)
s.Assert(target.NamedOutputs(entryPoint.String()) == nil, "Entry points can't have the same name as a named output")
if target.IsFilegroup {
s.Assert(target.NamedSources[entryPoint.String()] == nil, "Entry points can't have the same name as a named source on a filegroup")
}
entryPoints[name] = string(entryPoint)
}
target.EntryPoints = entryPoints
}
// addEnv adds entry points to a target
func addEnv(s *scope, arg pyObject, target *core.BuildTarget) {
envPy, ok := asDict(arg)
s.Assert(ok, "env must be a dict")
env := make(map[string]string, len(envPy))
for name, val := range envPy {
v, ok := val.(pyString)
s.Assert(ok, "Values of env must be strings, found %v at key %v", val.Type(), name)
env[name] = string(v)
}
target.Env = env
}
// addMaybeNamed adds inputs to a target, possibly in named groups.
func addMaybeNamed(s *scope, name string, obj pyObject, anon func(core.BuildInput), named func(string, core.BuildInput), systemAllowed, tool bool) {
if obj == nil {
return
}
if l, ok := asList(obj); ok {
for _, li := | if target.Sandbox && (target.Test == nil || target.Test.Sandbox) {
return nil
}
}
| random_line_split |
targets.go | target.Test.Flakiness = 1
} else {
target.Test.Flakiness = uint8(i)
target.AddLabel("flaky")
}
}
} else {
target.Test.Flakiness = 1
}
if testCmd != nil && testCmd != None {
target.Test.Command, target.Test.Commands = decodeCommands(s, args[testCMDBuildRuleArgIdx])
}
target.Test.Timeout = sizeAndTimeout(s, size, args[testTimeoutBuildRuleArgIdx], s.state.Config.Test.Timeout)
target.Test.Sandbox = isTruthy(testSandboxBuildRuleArgIdx)
target.Test.NoOutput = isTruthy(noTestOutputBuildRuleArgIdx)
}
if err := validateSandbox(s.state, target); err != nil {
log.Fatal(err)
}
if s.state.Config.Build.Config == "dbg" {
target.Debug = new(core.DebugFields)
target.Debug.Command, _ = decodeCommands(s, args[debugCMDBuildRuleArgIdx])
}
return target
}
// validateSandbox ensures that the target isn't opting out of the build/test sandbox when it's not allowed to
func validateSandbox(state *core.BuildState, target *core.BuildTarget) error {
if target.IsFilegroup || len(state.Config.Sandbox.ExcludeableTargets) == 0 {
return nil
}
if !target.IsRemoteFile {
if target.Sandbox && (target.Test == nil || target.Test.Sandbox) {
return nil
}
}
if target.Label.PackageName == "_please" {
return nil
}
for _, whitelist := range state.Config.Sandbox.ExcludeableTargets {
if whitelist.Matches(target.Label) {
return nil
}
}
for _, dir := range state.Config.Parse.ExperimentalDir {
if strings.HasPrefix(target.Label.PackageName, dir) {
return nil
}
}
return fmt.Errorf("%v is not whitelisted to opt out of the sandbox", target)
}
// sizeAndTimeout handles the size and build/test timeout arguments.
func sizeAndTimeout(s *scope, size *core.Size, timeout pyObject, defaultTimeout cli.Duration) time.Duration {
switch t := timeout.(type) {
case pyInt:
if t > 0 {
return time.Duration(t) * time.Second
}
case pyString:
return time.Duration(mustSize(s, string(t)).Timeout)
}
if size != nil {
return time.Duration(size.Timeout)
}
return time.Duration(defaultTimeout)
}
// mustSize looks up a size by name. It panics if it cannot be found.
func mustSize(s *scope, name string) *core.Size {
size, present := s.state.Config.Size[name]
s.Assert(present, "Unknown size %s", name)
return size
}
// decodeCommands takes a Python object and returns it as a string and a map; only one will be set.
func decodeCommands(s *scope, obj pyObject) (string, map[string]string) {
if obj == nil || obj == None {
return "", nil
} else if cmd, ok := obj.(pyString); ok {
return strings.TrimSpace(string(cmd)), nil
}
cmds, ok := asDict(obj)
s.Assert(ok, "Unknown type for command [%s]", obj.Type())
// Have to convert all the keys too
m := make(map[string]string, len(cmds))
for k, v := range cmds {
if v != None {
sv, ok := v.(pyString)
s.Assert(ok, "Unknown type for command")
m[k] = strings.TrimSpace(string(sv))
}
}
return "", m
}
// populateTarget sets the assorted attributes on a build target.
func populateTarget(s *scope, t *core.BuildTarget, args []pyObject) | addStrings(s, "requires", args[requiresBuildRuleArgIdx], t.AddRequire)
if vis, ok := asList(args[visibilityBuildRuleArgIdx]); ok && len(vis) != 0 {
if v, ok := vis[0].(pyString); ok && v == "PUBLIC" {
t.Visibility = core.WholeGraph
} else {
addStrings(s, "visibility", args[visibilityBuildRuleArgIdx], func(str string) {
t.Visibility = append(t.Visibility, parseVisibility(s, str))
})
}
}
addEntryPoints(s, args[entryPointsArgIdx], t)
addEnv(s, args[envArgIdx], t)
addMaybeNamedSecret(s, "secrets", args[secretsBuildRuleArgIdx], t.AddSecret, t.AddNamedSecret, t, true)
addProvides(s, "provides", args[providesBuildRuleArgIdx], t)
if f := callbackFunction(s, "pre_build", args[preBuildBuildRuleArgIdx], 1, "argument"); f != nil {
t.PreBuildFunction = &preBuildFunction{f: f, s: s}
}
if f := callbackFunction(s, "post_build", args[postBuildBuildRuleArgIdx], 2, "arguments"); f != nil {
t.PostBuildFunction = &postBuildFunction{f: f, s: s}
}
if t.IsTest() {
addMaybeNamedOrString(s, "test_tools", args[testToolsBuildRuleArgIdx], t.AddTestTool, t.AddNamedTestTool, true, true)
addMaybeNamedOutput(s, "test_outputs", args[testOutputsBuildRuleArgIdx], t.AddTestOutput, nil, t, false)
}
if t.Debug != nil {
addMaybeNamed(s, "debug_data", args[debugDataBuildRuleArgIdx], t.AddDebugDatum, t.AddDebugNamedDatum, false, false)
addMaybeNamedOrString(s, "debug_tools", args[debugToolsBuildRuleArgIdx], t.AddDebugTool, t.AddNamedDebugTool, true, true)
}
}
// addEntryPoints adds entry points to a target
func addEntryPoints(s *scope, arg pyObject, target *core.BuildTarget) {
entryPointsPy, ok := asDict(arg)
s.Assert(ok, "entry_points must be a dict")
entryPoints := make(map[string]string, len(entryPointsPy))
for name, entryPointPy := range entryPointsPy {
entryPoint, ok := entryPointPy.(pyString)
s.Assert(ok, "Values of entry_points must be strings, found %v at key %v", entryPointPy.Type(), name)
s.Assert(target.NamedOutputs(entryPoint.String()) == nil, "Entry points can't have the same name as a named output")
if target.IsFilegroup {
s.Assert(target.NamedSources[entryPoint.String()] == nil, "Entry points can't have the same name as a named source on a filegroup")
}
entryPoints[name] = string(entryPoint)
}
target.EntryPoints = entryPoints
}
// addEnv adds entry points to a target
func addEnv(s *scope, arg pyObject, target *core.BuildTarget) {
envPy, ok := asDict(arg)
s.Assert(ok, "env must be a dict")
env := make(map[string]string, len(envPy))
for name, val := range envPy {
v, ok := val.(pyString)
s.Assert(ok, "Values of env must be strings, found %v at key %v", val.Type(), name)
env[name] = string(v)
}
target.Env = env
}
// addMaybeNamed adds inputs to a target, possibly in named groups.
func addMaybeNamed(s *scope, name string, obj pyObject, anon func(core.BuildInput), named func(string, core.BuildInput), systemAllowed, tool bool) {
if obj == nil {
return
}
if l, ok := asList(obj); ok {
for _, li := | {
if t.IsRemoteFile {
for _, url := range mustList(args[urlsBuildRuleArgIdx]) {
t.AddSource(core.URLLabel(url.(pyString)))
}
} else if t.IsTextFile {
t.FileContent = args[fileContentArgIdx].(pyString).String()
}
addMaybeNamed(s, "srcs", args[srcsBuildRuleArgIdx], t.AddSource, t.AddNamedSource, false, false)
addMaybeNamedOrString(s, "tools", args[toolsBuildRuleArgIdx], t.AddTool, t.AddNamedTool, true, true)
addMaybeNamed(s, "system_srcs", args[systemSrcsBuildRuleArgIdx], t.AddSource, nil, true, false)
addMaybeNamed(s, "data", args[dataBuildRuleArgIdx], t.AddDatum, t.AddNamedDatum, false, false)
addMaybeNamedOutput(s, "outs", args[outsBuildRuleArgIdx], t.AddOutput, t.AddNamedOutput, t, false)
addMaybeNamedOutput(s, "optional_outs", args[optionalOutsBuildRuleArgIdx], t.AddOptionalOutput, nil, t, true)
addDependencies(s, "deps", args[depsBuildRuleArgIdx], t, false, false)
addDependencies(s, "exported_deps", args[exportedDepsBuildRuleArgIdx], t, true, false)
addDependencies(s, "internal_deps", args[internalDepsBuildRuleArgIdx], t, false, true)
addStrings(s, "labels", args[labelsBuildRuleArgIdx], t.AddLabel)
addStrings(s, "hashes", args[hashesBuildRuleArgIdx], t.AddHash)
addStrings(s, "licences", args[licencesBuildRuleArgIdx], t.AddLicence) | identifier_body |
targets.go | target.Test.Flakiness = 1
} else {
target.Test.Flakiness = uint8(i)
target.AddLabel("flaky")
}
}
} else {
target.Test.Flakiness = 1
}
if testCmd != nil && testCmd != None {
target.Test.Command, target.Test.Commands = decodeCommands(s, args[testCMDBuildRuleArgIdx])
}
target.Test.Timeout = sizeAndTimeout(s, size, args[testTimeoutBuildRuleArgIdx], s.state.Config.Test.Timeout)
target.Test.Sandbox = isTruthy(testSandboxBuildRuleArgIdx)
target.Test.NoOutput = isTruthy(noTestOutputBuildRuleArgIdx)
}
if err := validateSandbox(s.state, target); err != nil {
log.Fatal(err)
}
if s.state.Config.Build.Config == "dbg" {
target.Debug = new(core.DebugFields)
target.Debug.Command, _ = decodeCommands(s, args[debugCMDBuildRuleArgIdx])
}
return target
}
// validateSandbox ensures that the target isn't opting out of the build/test sandbox when it's not allowed to
func validateSandbox(state *core.BuildState, target *core.BuildTarget) error {
if target.IsFilegroup || len(state.Config.Sandbox.ExcludeableTargets) == 0 {
return nil
}
if !target.IsRemoteFile {
if target.Sandbox && (target.Test == nil || target.Test.Sandbox) {
return nil
}
}
if target.Label.PackageName == "_please" {
return nil
}
for _, whitelist := range state.Config.Sandbox.ExcludeableTargets {
if whitelist.Matches(target.Label) {
return nil
}
}
for _, dir := range state.Config.Parse.ExperimentalDir {
if strings.HasPrefix(target.Label.PackageName, dir) {
return nil
}
}
return fmt.Errorf("%v is not whitelisted to opt out of the sandbox", target)
}
// sizeAndTimeout handles the size and build/test timeout arguments.
func sizeAndTimeout(s *scope, size *core.Size, timeout pyObject, defaultTimeout cli.Duration) time.Duration {
switch t := timeout.(type) {
case pyInt:
if t > 0 {
return time.Duration(t) * time.Second
}
case pyString:
return time.Duration(mustSize(s, string(t)).Timeout)
}
if size != nil {
return time.Duration(size.Timeout)
}
return time.Duration(defaultTimeout)
}
// mustSize looks up a size by name. It panics if it cannot be found.
func mustSize(s *scope, name string) *core.Size {
size, present := s.state.Config.Size[name]
s.Assert(present, "Unknown size %s", name)
return size
}
// decodeCommands takes a Python object and returns it as a string and a map; only one will be set.
func decodeCommands(s *scope, obj pyObject) (string, map[string]string) {
if obj == nil || obj == None | else if cmd, ok := obj.(pyString); ok {
return strings.TrimSpace(string(cmd)), nil
}
cmds, ok := asDict(obj)
s.Assert(ok, "Unknown type for command [%s]", obj.Type())
// Have to convert all the keys too
m := make(map[string]string, len(cmds))
for k, v := range cmds {
if v != None {
sv, ok := v.(pyString)
s.Assert(ok, "Unknown type for command")
m[k] = strings.TrimSpace(string(sv))
}
}
return "", m
}
// populateTarget sets the assorted attributes on a build target.
func populateTarget(s *scope, t *core.BuildTarget, args []pyObject) {
if t.IsRemoteFile {
for _, url := range mustList(args[urlsBuildRuleArgIdx]) {
t.AddSource(core.URLLabel(url.(pyString)))
}
} else if t.IsTextFile {
t.FileContent = args[fileContentArgIdx].(pyString).String()
}
addMaybeNamed(s, "srcs", args[srcsBuildRuleArgIdx], t.AddSource, t.AddNamedSource, false, false)
addMaybeNamedOrString(s, "tools", args[toolsBuildRuleArgIdx], t.AddTool, t.AddNamedTool, true, true)
addMaybeNamed(s, "system_srcs", args[systemSrcsBuildRuleArgIdx], t.AddSource, nil, true, false)
addMaybeNamed(s, "data", args[dataBuildRuleArgIdx], t.AddDatum, t.AddNamedDatum, false, false)
addMaybeNamedOutput(s, "outs", args[outsBuildRuleArgIdx], t.AddOutput, t.AddNamedOutput, t, false)
addMaybeNamedOutput(s, "optional_outs", args[optionalOutsBuildRuleArgIdx], t.AddOptionalOutput, nil, t, true)
addDependencies(s, "deps", args[depsBuildRuleArgIdx], t, false, false)
addDependencies(s, "exported_deps", args[exportedDepsBuildRuleArgIdx], t, true, false)
addDependencies(s, "internal_deps", args[internalDepsBuildRuleArgIdx], t, false, true)
addStrings(s, "labels", args[labelsBuildRuleArgIdx], t.AddLabel)
addStrings(s, "hashes", args[hashesBuildRuleArgIdx], t.AddHash)
addStrings(s, "licences", args[licencesBuildRuleArgIdx], t.AddLicence)
addStrings(s, "requires", args[requiresBuildRuleArgIdx], t.AddRequire)
if vis, ok := asList(args[visibilityBuildRuleArgIdx]); ok && len(vis) != 0 {
if v, ok := vis[0].(pyString); ok && v == "PUBLIC" {
t.Visibility = core.WholeGraph
} else {
addStrings(s, "visibility", args[visibilityBuildRuleArgIdx], func(str string) {
t.Visibility = append(t.Visibility, parseVisibility(s, str))
})
}
}
addEntryPoints(s, args[entryPointsArgIdx], t)
addEnv(s, args[envArgIdx], t)
addMaybeNamedSecret(s, "secrets", args[secretsBuildRuleArgIdx], t.AddSecret, t.AddNamedSecret, t, true)
addProvides(s, "provides", args[providesBuildRuleArgIdx], t)
if f := callbackFunction(s, "pre_build", args[preBuildBuildRuleArgIdx], 1, "argument"); f != nil {
t.PreBuildFunction = &preBuildFunction{f: f, s: s}
}
if f := callbackFunction(s, "post_build", args[postBuildBuildRuleArgIdx], 2, "arguments"); f != nil {
t.PostBuildFunction = &postBuildFunction{f: f, s: s}
}
if t.IsTest() {
addMaybeNamedOrString(s, "test_tools", args[testToolsBuildRuleArgIdx], t.AddTestTool, t.AddNamedTestTool, true, true)
addMaybeNamedOutput(s, "test_outputs", args[testOutputsBuildRuleArgIdx], t.AddTestOutput, nil, t, false)
}
if t.Debug != nil {
addMaybeNamed(s, "debug_data", args[debugDataBuildRuleArgIdx], t.AddDebugDatum, t.AddDebugNamedDatum, false, false)
addMaybeNamedOrString(s, "debug_tools", args[debugToolsBuildRuleArgIdx], t.AddDebugTool, t.AddNamedDebugTool, true, true)
}
}
// addEntryPoints adds entry points to a target
func addEntryPoints(s *scope, arg pyObject, target *core.BuildTarget) {
entryPointsPy, ok := asDict(arg)
s.Assert(ok, "entry_points must be a dict")
entryPoints := make(map[string]string, len(entryPointsPy))
for name, entryPointPy := range entryPointsPy {
entryPoint, ok := entryPointPy.(pyString)
s.Assert(ok, "Values of entry_points must be strings, found %v at key %v", entryPointPy.Type(), name)
s.Assert(target.NamedOutputs(entryPoint.String()) == nil, "Entry points can't have the same name as a named output")
if target.IsFilegroup {
s.Assert(target.NamedSources[entryPoint.String()] == nil, "Entry points can't have the same name as a named source on a filegroup")
}
entryPoints[name] = string(entryPoint)
}
target.EntryPoints = entryPoints
}
// addEnv adds entry points to a target
func addEnv(s *scope, arg pyObject, target *core.BuildTarget) {
envPy, ok := asDict(arg)
s.Assert(ok, "env must be a dict")
env := make(map[string]string, len(envPy))
for name, val := range envPy {
v, ok := val.(pyString)
s.Assert(ok, "Values of env must be strings, found %v at key %v", val.Type(), name)
env[name] = string(v)
}
target.Env = env
}
// addMaybeNamed adds inputs to a target, possibly in named groups.
func addMaybeNamed(s *scope, name string, obj pyObject, anon func(core.BuildInput), named func(string, core.BuildInput), systemAllowed, tool bool) {
if obj == nil {
return
}
if l, ok := asList(obj); ok {
for _, li := | {
return "", nil
} | conditional_block |
tables.py | _keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = quant.m
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def create_stream_table_ui(
streams, true_state=False, time_point=0, orient="columns", precision=5
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later. Note: This function process each stream the same way
`create_stream_table_dataframe` does.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
precision: rounding the floating numbers to the give precision. Default
is 5 digits after the floating point.
Returns:
A pandas DataFrame containing the stream table data.
"""
# Variable Types:
class VariableTypes:
UNFIXED = "unfixed"
FIXED = "fixed"
PARAMETER = "parameter"
EXPRESSION = "expression"
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
# Identifying value's variable type
var_type = None
if isinstance(disp_dict[k][i], (_GeneralVarData, Var)):
if disp_dict[k][i].fixed:
var_type = VariableTypes.FIXED
else:
var_type = VariableTypes.UNFIXED
elif isinstance(disp_dict[k][i], Param):
var_type = VariableTypes.PARAMETER
elif isinstance(disp_dict[k][i], Expression):
var_type = VariableTypes.EXPRESSION
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = (
round(quant.m, precision),
var_type,
)
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
def _get_state_from_port(port, time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. If no components are assigned to the Port, raise a
ValueError. If the first component's parent block has no index, raise an
AttributeError. If different variables on the port appear to be connected
to different state blocks, raise a RuntimeError.
Args:
port (pyomo.network.Port): a port with variables derived from some
single StateBlock
time_point : point in the time domain at which to index StateBlock
(default = 0)
Returns:
(StateBlock-like) : an object containing all the components contained
in the port.
"""
vlist = list(port.iter_vars())
states = [v.parent_block().parent_component() for v in vlist]
if len(vlist) == 0:
raise ValueError(
f"No block could be retrieved from Port {port.name} "
f"because it contains no components."
)
# Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
try:
idx = vlist[0].parent_block().index()
except AttributeError as err:
raise AttributeError(
f"No block could be retrieved from Port {port.name} "
f"because block {vlist[0].parent_block().name} has no index."
) from err
# Assuming the time index is always first and the spatial indices are all
# the same
if isinstance(idx, tuple):
idx = (time_point, vlist[0].parent_block().index()[1:])
else:
idx = (time_point,)
# This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
if all(states[0] is s for s in states):
return states[0][idx]
raise RuntimeError(
f"No block could be retrieved from Port {port.name} "
f"because components are derived from multiple blocks."
)
def generate_table(blocks, attributes, heading=None, exception=True):
"""
Create a Pandas DataFrame that contains a list of user-defined attributes
from a set of Blocks.
Args:
blocks (dict): A dictionary with name keys and BlockData objects for
values. Any name can be associated with a block. Use an OrderedDict
to show the blocks in a specific order, otherwise the dataframe can
be sorted later.
attributes (list or tuple of strings): Attributes to report from a
Block, can be a Var, Param, or Expression. If an attribute doesn't
exist or doesn't have a valid value, it will be treated as missing
data.
heading (list or tuple of strings): A list of strings that will be used
as column headings. If None the attribute names will be used.
exception (bool): If True, raise exceptions related to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with different
indexing. (default is True)
Returns:
(DataFrame): A Pandas dataframe containing a data table
"""
if heading is None:
heading = attributes
st = DataFrame(columns=heading)
row = [None] * len(attributes) # not a big deal but save time on realloc
for key, s in blocks.items():
| for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, assume index supplied
try:
assert len(a) > 1
except AssertionError:
_log.error(f"An index must be supplided for attribute {a[0]}")
raise AssertionError(
f"An index must be supplided for attribute {a[0]}"
)
j = a[1:]
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None | conditional_block |
|
tables.py | of a translator block.
sb = _get_state_from_port(a.ports[1], time_point)
except: # pylint: disable=W0702
sb = _get_state_from_port(a.ports[0], time_point)
_stream_dict_add(sb, n, i)
elif isinstance(streams[n], Port):
sb = _get_state_from_port(streams[n], time_point)
_stream_dict_add(sb, n)
else:
# _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
try:
sb = streams[n][time_point]
except KeyError as err:
raise TypeError(
f"Either component type of stream argument {streams[n]} "
f"is unindexed or {time_point} is not a member of its "
f"indexing set."
) from err
_stream_dict_add(sb, n)
return stream_dict
def create_stream_table_dataframe(
streams, true_state=False, time_point=0, orient="columns"
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = quant.m
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def create_stream_table_ui(
streams, true_state=False, time_point=0, orient="columns", precision=5
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later. Note: This function process each stream the same way
`create_stream_table_dataframe` does.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
precision: rounding the floating numbers to the give precision. Default
is 5 digits after the floating point.
Returns:
A pandas DataFrame containing the stream table data.
"""
# Variable Types:
class VariableTypes:
UNFIXED = "unfixed"
FIXED = "fixed"
PARAMETER = "parameter"
EXPRESSION = "expression"
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
# Identifying value's variable type
var_type = None
if isinstance(disp_dict[k][i], (_GeneralVarData, Var)):
if disp_dict[k][i].fixed:
var_type = VariableTypes.FIXED
else:
var_type = VariableTypes.UNFIXED
elif isinstance(disp_dict[k][i], Param):
var_type = VariableTypes.PARAMETER
elif isinstance(disp_dict[k][i], Expression):
var_type = VariableTypes.EXPRESSION
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = (
round(quant.m, precision),
var_type,
)
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
def _get_state_from_port(port, time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. If no components are assigned to the Port, raise a
ValueError. If the first component's parent block has no index, raise an
AttributeError. If different variables on the port appear to be connected
to different state blocks, raise a RuntimeError.
Args:
port (pyomo.network.Port): a port with variables derived from some
single StateBlock
time_point : point in the time domain at which to index StateBlock
(default = 0)
Returns:
(StateBlock-like) : an object containing all the components contained
in the port.
"""
vlist = list(port.iter_vars())
states = [v.parent_block().parent_component() for v in vlist]
if len(vlist) == 0:
raise ValueError(
f"No block could be retrieved from Port {port.name} "
f"because it contains no components."
)
# Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
try:
idx = vlist[0].parent_block().index()
except AttributeError as err:
raise AttributeError(
f"No block could be retrieved from Port {port.name} "
f"because block {vlist[0].parent_block().name} has no index."
) from err
# Assuming the time index is always first and the spatial indices are all
# the same
if isinstance(idx, tuple):
idx = (time_point, vlist[0].parent_block().index()[1:])
else:
idx = (time_point,)
# This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
if all(states[0] is s for s in states):
return states[0][idx]
raise RuntimeError(
f"No block could be retrieved from Port {port.name} "
f"because components are derived from multiple blocks."
)
def | generate_table | identifier_name |
|
tables.py | str): Prepend a string to the arc name joined with a '.'.
This can be useful to prevent conflicting names when sub blocks
contain Arcs that have the same names when used in combination
with descend_into=False.
s (dict): Add streams to an existing stream dict.
Returns:
Dictionary with Arc names as keys and the Arcs as values.
"""
if s is None:
s = {}
for c in blk.component_objects(Arc, descend_into=descend_into):
key = c.getname()
if prepend is not None:
key = ".".join([prepend, key])
s[key] = c
if additional is not None:
s.update(additional)
if sort:
s = OrderedDict(sorted(s.items()))
return s
def stream_states_dict(streams, time_point=0):
"""
Method to create a dictionary of state block representing stream states.
This takes a dict with stream name keys and stream values.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
time_point : point in the time domain at which to generate stream table
(default = 0)
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_dict = OrderedDict()
def _stream_dict_add(sb, n, i=None):
"""add a line to the stream table"""
if i is None:
key = n
else:
key = "{}[{}]".format(n, i)
stream_dict[key] = sb
for n in streams.keys():
if isinstance(streams[n], Arc):
for i, a in streams[n].items():
try:
# if getting the StateBlock from the destination port
# fails for any reason try the source port. This could
# happen if a port does not have an associated
# StateBlock. For example a surrogate model may not
# use state blocks, unit models may handle physical
# properties without state blocks, or the port could
# be used to serve the purpose of a translator block.
sb = _get_state_from_port(a.ports[1], time_point)
except: # pylint: disable=W0702
sb = _get_state_from_port(a.ports[0], time_point)
_stream_dict_add(sb, n, i)
elif isinstance(streams[n], Port):
sb = _get_state_from_port(streams[n], time_point)
_stream_dict_add(sb, n)
else:
# _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
try:
sb = streams[n][time_point]
except KeyError as err:
raise TypeError(
f"Either component type of stream argument {streams[n]} "
f"is unindexed or {time_point} is not a member of its "
f"indexing set."
) from err
_stream_dict_add(sb, n)
return stream_dict
def create_stream_table_dataframe(
streams, true_state=False, time_point=0, orient="columns"
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = quant.m
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
| return DataFrame.from_dict(stream_attributes, orient=orient)
def create_stream_table_ui(
streams, true_state=False, time_point=0, orient="columns", precision=5
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later. Note: This function process each stream the same way
`create_stream_table_dataframe` does.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
precision: rounding the floating numbers to the give precision. Default
is 5 digits after the floating point.
Returns:
A pandas DataFrame containing the stream table data.
"""
# Variable Types:
class VariableTypes:
UNFIXED = "unfixed"
FIXED = "fixed"
PARAMETER = "parameter"
EXPRESSION = "expression"
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
# Identifying value's variable type
var_type = None
if isinstance(disp_dict[k][i], (_GeneralVarData, Var)):
if disp_dict[k][i].fixed:
var_type = VariableTypes.FIXED
else:
var_type = VariableTypes.UNFIXED
elif isinstance(disp_dict[k][i], Param):
var_type = VariableTypes.PARAMETER
elif isinstance(disp_dict[k][i], Expression):
var_type = VariableTypes.EXPRESSION
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = (
round(quant.m, precision),
var_type,
)
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
def _get_state_from_port(port, time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. | random_line_split |
|
tables.py | ): Prepend a string to the arc name joined with a '.'.
This can be useful to prevent conflicting names when sub blocks
contain Arcs that have the same names when used in combination
with descend_into=False.
s (dict): Add streams to an existing stream dict.
Returns:
Dictionary with Arc names as keys and the Arcs as values.
"""
if s is None:
s = {}
for c in blk.component_objects(Arc, descend_into=descend_into):
key = c.getname()
if prepend is not None:
key = ".".join([prepend, key])
s[key] = c
if additional is not None:
s.update(additional)
if sort:
s = OrderedDict(sorted(s.items()))
return s
def stream_states_dict(streams, time_point=0):
"""
Method to create a dictionary of state block representing stream states.
This takes a dict with stream name keys and stream values.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
time_point : point in the time domain at which to generate stream table
(default = 0)
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_dict = OrderedDict()
def _stream_dict_add(sb, n, i=None):
"""add a line to the stream table"""
if i is None:
key = n
else:
key = "{}[{}]".format(n, i)
stream_dict[key] = sb
for n in streams.keys():
if isinstance(streams[n], Arc):
for i, a in streams[n].items():
try:
# if getting the StateBlock from the destination port
# fails for any reason try the source port. This could
# happen if a port does not have an associated
# StateBlock. For example a surrogate model may not
# use state blocks, unit models may handle physical
# properties without state blocks, or the port could
# be used to serve the purpose of a translator block.
sb = _get_state_from_port(a.ports[1], time_point)
except: # pylint: disable=W0702
sb = _get_state_from_port(a.ports[0], time_point)
_stream_dict_add(sb, n, i)
elif isinstance(streams[n], Port):
sb = _get_state_from_port(streams[n], time_point)
_stream_dict_add(sb, n)
else:
# _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
try:
sb = streams[n][time_point]
except KeyError as err:
raise TypeError(
f"Either component type of stream argument {streams[n]} "
f"is unindexed or {time_point} is not a member of its "
f"indexing set."
) from err
_stream_dict_add(sb, n)
return stream_dict
def create_stream_table_dataframe(
streams, true_state=False, time_point=0, orient="columns"
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = quant.m
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def create_stream_table_ui(
streams, true_state=False, time_point=0, orient="columns", precision=5
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later. Note: This function process each stream the same way
`create_stream_table_dataframe` does.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
precision: rounding the floating numbers to the give precision. Default
is 5 digits after the floating point.
Returns:
A pandas DataFrame containing the stream table data.
"""
# Variable Types:
class VariableTypes:
|
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for row, i in enumerate(disp_dict[k]):
stream_key = k if i is None else f"{k} {i}"
# Identifying value's variable type
var_type = None
if isinstance(disp_dict[k][i], (_GeneralVarData, Var)):
if disp_dict[k][i].fixed:
var_type = VariableTypes.FIXED
else:
var_type = VariableTypes.UNFIXED
elif isinstance(disp_dict[k][i], Param):
var_type = VariableTypes.PARAMETER
elif isinstance(disp_dict[k][i], Expression):
var_type = VariableTypes.EXPRESSION
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = (
round(quant.m, precision),
var_type,
)
if row == 0 or stream_key not in stream_attributes["Units"]:
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
def _get_state_from_port(port, time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. | UNFIXED = "unfixed"
FIXED = "fixed"
PARAMETER = "parameter"
EXPRESSION = "expression" | identifier_body |
signin.py | ['level']} {record['owner']} {record['message']}"
G.log_history.append(msg)
if G.mainwindow:
G.mainwindow.job.log_signal.emit(msg)
if G.loading:
try:
msg1 = json.loads(record['message'].replace("\'", "\""))
G.loading.msg.setText(msg1['ErrorMsg'])
except Exception as e:
G.loading.msg.setText(record['message'])
simnow_yd = dict(
brokerid="9999",
md_address="tcp://218.202.237.33:10112",
td_address="tcp://218.202.237.33:10102",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
simnow_24 = dict(
brokerid="9999",
md_address="tcp://180.168.146.187:10131",
td_address="tcp://180.168.146.187:10130",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
class SignInWidget(QWidget, Ui_SignIn):
def __init__(self):
| self.setTabOrder(self.remember_me, self.sign_in_btn)
self.icon.installEventFilter(self)
self.icon.setText('快速登录')
self.icon.setStyleSheet("""
QLabel{
image: url(:/menu/images/bee_temp_grey.png);
}
QLabel:hover{
color:#1B89CA;
border:1px solid #2B2B2B;
border-radius: 5px;
}
""")
#
self.sign_in_btn.clicked.connect(self.sign_in_slot)
self.sign_in_btn.setDisabled(True)
#
for i in self.__dict__.values():
if isinstance(i, QLineEdit):
i.setContextMenuPolicy(Qt.NoContextMenu) ######不允许右键产生子菜单
self.login_tab.currentChanged.connect(self.check_disable)
# 普通
self.userid_sim.currentTextChanged.connect(self.check_disable)
self.password_sim.textChanged.connect(self.check_disable)
#
self.userid.currentTextChanged.connect(self.check_disable)
self.password.textChanged.connect(self.check_disable)
self.brokerid.currentTextChanged.connect(self.check_disable)
self.auth_code.currentTextChanged.connect(self.check_disable)
self.appid.currentTextChanged.connect(self.check_disable)
self.td_address.currentTextChanged.connect(self.check_disable)
self.td_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.currentTextChanged.connect(self.check_disable)
# timer
self.timer = QTimer(self)
self.timer.timeout.connect(self.close_load)
self.load_remember()
def submask(self):
self.bmp
= QBitmap(self.size())
self.bmp.fill()
self.p = QPainter(self.bmp)
self.p.setPen(Qt.black)
self.p.setBrush(Qt.black)
self.p.drawRoundedRect(self.bmp.rect(), 10, 10)
self.setMask(self.bmp)
@Slot()
def check_disable(self):
if self.login_tab.currentIndex() == 0:
if self.userid_sim.currentText() and self.password_sim.text():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
if self.login_tab.currentIndex() == 1:
if self.userid.currentText() and \
self.password.text() and \
self.brokerid.currentText() and \
self.auth_code.currentText() and \
self.appid.currentText() and \
self.td_address.currentText() and \
self.md_address.currentText():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
def editTextChanged_slot(self):
td = self.td_address.currentText()
md = self.md_address.currentText()
k = 'tcp://'
if not md.startswith(k):
self.md_address.setCurrentText(k + md)
if not td.startswith(k):
self.td_address.setCurrentText(k + td)
def load_remember(self):
def get_account(path):
data = {}
with open(path, 'r')as f:
info = f.read()
if info:
try:
data = json.loads(info)
if not isinstance(info, dict):
raise Exception
except Exception:
pass
return data
path_list = os.listdir(desktop_path)
for i in path_list:
path = join_path(desktop_path, i, '.account.json')
if os.path.exists(path):
info = get_account(path)
self.userid.addItem(info.get('userid'))
self.brokerid.addItem(info.get('brokerid'))
self.auth_code.addItem(info.get('auth_code'))
self.appid.addItem(info.get('appid'))
self.td_address.addItem(info.get('td_address'))
self.md_address.addItem(info.get('md_address'))
self.interface_.addItem(info.get('interface'))
path = join_path(desktop_path, i, '.sim.json')
if os.path.exists(path):
info = get_account(path)
self.userid_sim.addItem(info.get('userid'))
self.password_sim.setText(info.get('password'))
self.remember_me_sim.setChecked(True)
def load_config(self):
for k, v in G.config.to_dict().items():
if v:
current_app.config.update({k: v})
def close_load(self):
self.loading.close()
self.timer.stop()
def sign_in(self, info):
bee_app = CtpBee(name=info.get("username"), import_name=__name__, refresh=True)
login_info = {
"CONNECT_INFO": info,
"INTERFACE": info.get('interface'),
"TD_FUNC": True,
"MD_FUNC": True,
}
bee_app.config.from_mapping(login_info)
bee_app.start()
# loading
self.loading = LoadingDialog()
G.loading = self.loading
self.timer.start(2000) # ms
self.loading.msg.setText("正在连接服务器...")
self.loading.exec_()
if bee_app and \
bee_app.trader and \
bee_app.td_login_status:
##
G.signin_success(info['userid'])
##
self.load_config()
###
mainwindow = MainWindow()
mainwindow.sign_in_success()
mainwindow.show()
self.close()
return True
else:
return False
def sign_in_slot(self):
if self.login_tab.currentIndex() == 0:
self.common_sign_in()
elif self.login_tab.currentIndex() == 1:
self.detailed_sign_in()
def common_sign_in(self):
info = dict(
userid=self.userid_sim.currentText(),
password=self.password_sim.text(),
interface=self.interface_sim.currentText(),
)
which_ = self.other.currentText()
if which_ == 'simnow24小时':
info.update(simnow_24)
elif which_ == 'simnow移动':
info.update(simnow_yd)
if self.sign_in(info):
if self.remember_me_sim.isChecked():
account_path = os.path.join(G.user_path, ".sim.json")
with open(account_path, 'w') as f:
json.dump(info, f)
else:
if which_ == 'simnow24小时':
msg = 'simnow移动'
info.update(simnow_yd)
else:
msg = 'simnow24小时'
info.update(simnow_24)
reply = QMessageBox.question(self, '登录出现错误', "是否尝试" + msg,
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.interface_sim.setCurrentText(msg)
if not self.sign_in(info):
QMessageBox.information(self, "提示", "登录失败")
def detailed_sign_in(self):
info = dict(
userid=self.userid.currentText(),
password=self.password.text(),
brokerid=self.brokerid.currentText(),
md_address=self.md_address.currentText(),
td_address=self.td_address.currentText(),
product_info="",
appid=self.appid.currentText(),
auth_code=self.auth_code.currentText(),
interface=self.interface_.currentText(),
)
| super(SignInWidget, self).__init__()
self.setupUi(self)
self.setWindowTitle("ctpbee客户端")
# self.setWindowFlag(Qt.FramelessWindowHint) # 去边框
self.setWindowFlags(Qt.WindowCloseButtonHint)
self.setStyleSheet(qss)
# tab
self.setTabOrder(self.userid_sim, self.password_sim)
self.setTabOrder(self.password_sim, self.interface_sim)
self.setTabOrder(self.interface_sim, self.other)
self.setTabOrder(self.other, self.remember_me)
#
self.setTabOrder(self.userid, self.password)
self.setTabOrder(self.password, self.brokerid)
self.setTabOrder(self.brokerid, self.auth_code)
self.setTabOrder(self.auth_code, self.appid)
self.setTabOrder(self.appid, self.td_address)
self.setTabOrder(self.td_address, self.md_address)
self.setTabOrder(self.md_address, self.interface_)
self.setTabOrder(self.interface_, self.remember_me)
| identifier_body |
signin.py | ['level']} {record['owner']} {record['message']}"
G.log_history.append(msg)
if G.mainwindow:
G.mainwindow.job.log_signal.emit(msg)
if G.loading:
try:
msg1 = json.loads(record['message'].replace("\'", "\""))
G.loading.msg.setText(msg1['ErrorMsg'])
except Exception as e:
G.loading.msg.setText(record['message'])
simnow_yd = dict(
brokerid="9999",
md_address="tcp://218.202.237.33:10112",
td_address="tcp://218.202.237.33:10102",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
simnow_24 = dict(
brokerid="9999",
md_address="tcp://180.168.146.187:10131",
td_address="tcp://180.168.146.187:10130",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
class SignInWidget(QWidget, Ui_SignIn):
def __init__(self):
super(SignInWidget, self).__init__()
self.setupUi(self)
self.setWindowTitle("ctpbee客户端")
# self.setWindowFlag(Qt.FramelessWindowHint) # 去边框
self.setWindowFlags(Qt.WindowCloseButtonHint)
self.setStyleSheet(qss)
# tab
self.setTabOrder(self.userid_sim, self.password_sim)
self.setTabOrder(self.password_sim, self.interface_sim)
self.setTabOrder(self.interface_sim, self.other)
self.setTabOrder(self.other, self.remember_me)
#
self.setTabOrder(self.userid, self.password)
self.setTabOrder(self.password, self.brokerid)
self.setTabOrder(self.brokerid, self.auth_code)
self.setTabOrder(self.auth_code, self.appid)
self.setTabOrder(self.appid, self.td_address)
self.setTabOrder(self.td_address, self.md_address)
self.setTabOrder(self.md_address, self.interface_)
self.setTabOrder(self.interface_, self.remember_me)
self.setTabOrder(self.remember_me, self.sign_in_btn)
self.icon.installEventFilter(self)
self.icon.setText('快速登录')
self.icon.setStyleSheet("""
QLabel{
image: url(:/menu/images/bee_temp_grey.png);
}
QLabel:hover{
color:#1B89CA;
border:1px solid #2B2B2B;
border-radius: 5px;
}
""")
#
self.sign_in_btn.clicked.connect(self.sign_in_slot)
self.sign_in_btn.setDisabled(True)
#
for i in self.__dict__.values():
if isinstance(i, QLineEdit):
i.setContextMenuPolicy(Qt.NoContextMenu) ######不允许右键产生子菜单
self.login_tab.currentChanged.connect(self.check_disable)
# 普通
self.userid_sim.currentTextChanged.connect(self.check_disable)
self.password_sim.textChanged.connect(self.check_disable)
#
self.userid.currentTextChanged.connect(self.check_disable)
self.password.textChanged.connect(self.check_disable)
self.brokerid.currentTextChanged.connect(self.check_disable)
self.auth_code.currentTextChanged.connect(self.check_disable)
self.appid.currentTextChanged.connect(self.check_disable)
self.td_address.currentTextChanged.connect(self.check_disable)
self.td_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.currentTextChanged.connect(self.check_disable)
# timer
self.timer = QTimer(self)
self.timer.timeout.connect(self.close_load)
self.load_remember()
def submask(self):
self.bmp = QBitmap(self.size())
self.bmp.fill()
self.p = QPainter(self.bmp)
self.p.setPen(Qt.black)
self.p.setBrush(Qt.black)
self.p.drawRoundedRect(self.bmp.rect(), 10, 10)
self.setMask(self.bmp)
@Slot()
def check_disable(self):
if self.login_tab.currentIndex() == 0:
if self.userid_sim.currentText() and self.password_sim.text():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
if self.login_tab.currentIndex() == 1:
if self.userid.currentText() and \
self.password.text() and \
self.brokerid.currentText() and \
self.auth_code.currentText() and \
self.appid.currentText() and \
self.td_address.currentText() and \
self.md_address.currentText():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
def editTextChanged_slot(self):
td = self.td_address.currentText()
md = self.md_address.currentText()
k = 'tcp://'
if not md.startswith(k):
self.md_address.setCurrentText(k + md)
if not td.startswith(k):
self.td_address.setCurrentText(k + td)
def load_remember(self):
def get_account(path):
data = {}
with open(path, 'r')as f:
info = f.read()
if info:
try:
data = json.loads(info)
if not isinstance(info, dict):
raise Exception
except Exception:
pass
return data
path_list = os.listdir(desktop_path)
for i in path_list:
path = join_path(desktop_path, i, '.account.json')
if os.path.exists(path):
info = get_account(path)
self.userid.addItem(info.get('userid'))
self.brokerid.addItem(info.get('brokerid'))
self.auth_code.addItem(info.get('auth_code'))
self.appid.addItem(info.get('appid'))
self.td_address.addItem(info.get('td_address'))
self.md_address.addItem(info.get('md_address'))
self.interface_.addItem(info.get('interface'))
path = join_path(desktop_path, i, '.sim.json')
if os.path.exists(path):
info = get_account(path)
self.userid_sim.addItem(info.get('userid'))
self.password_sim.setText(info.get('password'))
self.remember_me_sim.setChecked(True)
def load_config(self):
for k, v in G.config.to_dict().items():
if v:
current_app.config.update({k: v})
def close_load(self):
self.loading.close()
self.timer.stop()
def sign_in(self, info):
bee_app = CtpBee(name=info.get("username"), import_name=__name__, refresh=True)
login_info = {
"CONNECT_INFO": info,
"INTERFACE": info.get('interface'),
"TD_FUNC": True,
"MD_FUNC": True,
}
bee_app.config.from_mapping(login_info)
bee_app.start()
# loading
self.loading = LoadingDialog()
G.loading = self.loading
self.timer.start(2000) # ms
self.loading.msg.setText("正在连接服务器...")
self.loading.exec_()
if bee_app and \
bee_app.trader and \
bee_app.td_login_status:
##
G.signin_success(info['userid'])
##
self.load_config()
###
mainwindow = MainWindow()
mainwindow.sign_in_success()
mainwindow.show()
self.close()
return True
else:
return False
def sign_in_slot(self):
if self.login_tab.currentInde | self.common_sign_in()
elif self.login_tab.currentIndex() == 1:
self.detailed_sign_in()
def common_sign_in(self):
info = dict(
userid=self.userid_sim.currentText(),
password=self.password_sim.text(),
interface=self.interface_sim.currentText(),
)
which_ = self.other.currentText()
if which_ == 'simnow24小时':
info.update(simnow_24)
elif which_ == 'simnow移动':
info.update(simnow_yd)
if self.sign_in(info):
if self.remember_me_sim.isChecked():
account_path = os.path.join(G.user_path, ".sim.json")
with open(account_path, 'w') as f:
json.dump(info, f)
else:
if which_ == 'simnow24小时':
msg = 'simnow移动'
info.update(simnow_yd)
else:
msg = 'simnow24小时'
info.update(simnow_24)
reply = QMessageBox.question(self, '登录出现错误', "是否尝试" + msg,
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.interface_sim.setCurrentText(msg)
if not self.sign_in(info):
QMessageBox.information(self, "提示", "登录失败")
def detailed_sign_in(self):
info = dict(
userid=self.userid.currentText(),
password=self.password.text(),
brokerid=self.brokerid.currentText(),
md_address=self.md_address.currentText(),
td_address=self.td_address.currentText(),
product_info="",
appid=self.appid.currentText(),
auth_code=self.auth_code.currentText(),
interface=self.interface_.currentText(),
| x() == 0:
| identifier_name |
signin.py | ['level']} {record['owner']} {record['message']}"
G.log_history.append(msg)
if G.mainwindow:
G.mainwindow.job.log_signal.emit(msg)
if G.loading:
try:
msg1 = json.loads(record['message'].replace("\'", "\""))
G.loading.msg.setText(msg1['ErrorMsg'])
except Exception as e:
G.loading.msg.setText(record['message'])
simnow_yd = dict(
brokerid="9999",
md_address="tcp://218.202.237.33:10112",
td_address="tcp://218.202.237.33:10102",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
simnow_24 = dict(
brokerid="9999",
md_address="tcp://180.168.146.187:10131",
td_address="tcp://180.168.146.187:10130",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
class SignInWidget(QWidget, Ui_SignIn):
def __init__(self):
super(SignInWidget, self).__init__()
self.setupUi(self)
self.setWindowTitle("ctpbee客户端")
# self.setWindowFlag(Qt.FramelessWindowHint) # 去边框
self.setWindowFlags(Qt.WindowCloseButtonHint)
self.setStyleSheet(qss)
# tab
self.setTabOrder(self.userid_sim, self.password_sim)
self.setTabOrder(self.password_sim, self.interface_sim)
self.setTabOrder(self.interface_sim, self.other)
self.setTabOrder(self.other, self.remember_me)
#
self.setTabOrder(self.userid, self.password)
self.setTabOrder(self.password, self.brokerid)
self.setTabOrder(self.brokerid, self.auth_code)
self.setTabOrder(self.auth_code, self.appid)
self.setTabOrder(self.appid, self.td_address)
self.setTabOrder(self.td_address, self.md_address)
self.setTabOrder(self.md_address, self.interface_)
self.setTabOrder(self.interface_, self.remember_me)
self.setTabOrder(self.remember_me, self.sign_in_btn)
self.icon.installEventFilter(self)
self.icon.setText('快速登录')
self.icon.setStyleSheet("""
QLabel{
image: url(:/menu/images/bee_temp_grey.png);
}
QLabel:hover{
color:#1B89CA;
border:1px solid #2B2B2B;
border-radius: 5px;
}
""")
#
self.sign_in_btn.clicked.connect(self.sign_in_slot)
self.sign_in_btn.setDisabled(True)
#
for i in self.__dict__.values():
if isinstance(i, QLineEdit):
i.setContextMenuPolicy(Qt.NoContextMenu) ######不允许右键产生子菜单
self.login_tab.currentChanged.connect(self.check_disable)
# 普通
self.userid_sim.currentTextChanged.connect(self.check_disable)
self.password_sim.textChanged.connect(self.check_disable)
#
self.userid.currentTextChanged.connect(self.check_disable)
self.password.textChanged.connect(self.check_disable)
self.brokerid.currentTextChanged.connect(self.check_disable)
self.auth_code.currentTextChanged.connect(self.check_disable)
self.appid.currentTextChanged.connect(self.check_disable)
self.td_address.currentTextChanged.connect(self.check_disable)
self.td_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.currentTextChanged.connect(self.check_disable)
# timer
self.timer = QTimer(self)
self.timer.timeout.connect(self.close_load)
self.load_remember()
def submask(self):
self.bmp = QBitmap(self.size())
self.bmp.fill()
self.p = QPainter(self.bmp)
self.p.setPen(Qt.black)
self.p.setBrush(Qt.black)
self.p.drawRoundedRect(self.bmp.rect(), 10, 10)
self.setMask(self.bmp)
@Slot()
def check_disable(self):
if self.login_tab.currentIndex() == 0:
if self.userid_sim.currentText() and self.password_sim.text():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
if self.login_tab.currentIndex() == 1:
if self.userid.currentText() and \
self.password.text() and \
self.brokerid.currentText() and \
self.auth_code.currentText() and \
self.appid.currentText() and \
self.td_address.currentText() and \
self.md_address.currentText():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
def editTextChanged_slot(self):
td = self.td_address.currentText()
md = self.md_address.currentText()
k = 'tcp://'
if not md.startswith(k):
self.md_address.setCurrentText(k + md)
if not td.startswith(k):
self.td_address.setCurrentText(k + td)
def load_remember(self):
def get_account(path):
data = {}
with open(path, 'r')as f:
info = f.read()
if info:
try:
data = json.loads(info)
if not isinstance(info, dict):
raise Exception
except Exception:
pass
return data
path_list = os.listdir(desktop_path)
for i in path_list:
path = join_path(desktop_path, i, '.account.json')
if os.path.exists(path):
info = get_account(path)
self.userid.addItem(info.get('userid'))
self.brokerid.addItem(info.get('brokerid'))
self.auth_code.addItem(info.get('auth_code'))
self.appid.addItem(info.get('appid'))
self.td_address.addItem(info.get('td_address'))
self.md_address.addItem(info.get('md_address'))
self.interface_.addItem(info.get('interface'))
path = join_path(desktop_path, i, '.sim.json')
if os.path.exists(path):
info = get_account(path)
self.userid_sim.addItem(info.get('userid'))
self.password_sim.setText(info.get('password'))
self.remember_me_sim.setChecked(True)
def load_config(self):
for k, v in G.config.to_dict().items():
if v:
current_app.config.update({k: v})
def close_load(self):
self.loading.close()
self.timer.stop()
def sign_in(self, info):
bee_app = CtpBee(name=info.get("username"), import_name=__name__, refresh=True)
login_info = {
"CONNECT_INFO": info,
"INTERFACE": info.get('interface'),
"TD_FUNC": True,
"MD_FUNC": True,
}
bee_app.config.from_mapping(login_info)
bee_app.start()
# loading
self.loading = LoadingDialog()
G.loading = self.loading
self.timer.start(2000) # ms
self.loading.msg.setText("正在连接服务器...")
self.loading.exec_()
if bee_app and \
bee_app.trader and \
bee_app.td_login_status:
##
G.signin_success(info['userid'])
##
self.load_config()
###
mainwindow = MainWindow()
mainwindow.sign_in_success()
mainwindow.show()
self.close()
return True
else:
return False
def sign_in_slot(self):
if self.login_tab.currentIndex() == 0:
self.common_sign_in()
elif self.login_tab.current | self.detailed_sign_in()
def common_sign_in(self):
info = dict(
userid=self.userid_sim.currentText(),
password=self.password_sim.text(),
interface=self.interface_sim.currentText(),
)
which_ = self.other.currentText()
if which_ == 'simnow24小时':
info.update(simnow_24)
elif which_ == 'simnow移动':
info.update(simnow_yd)
if self.sign_in(info):
if self.remember_me_sim.isChecked():
account_path = os.path.join(G.user_path, ".sim.json")
with open(account_path, 'w') as f:
json.dump(info, f)
else:
if which_ == 'simnow24小时':
msg = 'simnow移动'
info.update(simnow_yd)
else:
msg = 'simnow24小时'
info.update(simnow_24)
reply = QMessageBox.question(self, '登录出现错误', "是否尝试" + msg,
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.interface_sim.setCurrentText(msg)
if not self.sign_in(info):
QMessageBox.information(self, "提示", "登录失败")
def detailed_sign_in(self):
info = dict(
userid=self.userid.currentText(),
password=self.password.text(),
brokerid=self.brokerid.currentText(),
md_address=self.md_address.currentText(),
td_address=self.td_address.currentText(),
product_info="",
appid=self.appid.currentText(),
auth_code=self.auth_code.currentText(),
interface=self.interface_.currentText(),
)
| Index() == 1:
| conditional_block |
signin.py | record['level']} {record['owner']} {record['message']}"
G.log_history.append(msg)
if G.mainwindow:
G.mainwindow.job.log_signal.emit(msg)
if G.loading:
try:
msg1 = json.loads(record['message'].replace("\'", "\""))
G.loading.msg.setText(msg1['ErrorMsg'])
except Exception as e:
G.loading.msg.setText(record['message'])
simnow_yd = dict(
brokerid="9999",
md_address="tcp://218.202.237.33:10112",
td_address="tcp://218.202.237.33:10102",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
| md_address="tcp://180.168.146.187:10131",
td_address="tcp://180.168.146.187:10130",
product_info="",
appid="simnow_client_test",
auth_code="0000000000000000",
)
class SignInWidget(QWidget, Ui_SignIn):
def __init__(self):
super(SignInWidget, self).__init__()
self.setupUi(self)
self.setWindowTitle("ctpbee客户端")
# self.setWindowFlag(Qt.FramelessWindowHint) # 去边框
self.setWindowFlags(Qt.WindowCloseButtonHint)
self.setStyleSheet(qss)
# tab
self.setTabOrder(self.userid_sim, self.password_sim)
self.setTabOrder(self.password_sim, self.interface_sim)
self.setTabOrder(self.interface_sim, self.other)
self.setTabOrder(self.other, self.remember_me)
#
self.setTabOrder(self.userid, self.password)
self.setTabOrder(self.password, self.brokerid)
self.setTabOrder(self.brokerid, self.auth_code)
self.setTabOrder(self.auth_code, self.appid)
self.setTabOrder(self.appid, self.td_address)
self.setTabOrder(self.td_address, self.md_address)
self.setTabOrder(self.md_address, self.interface_)
self.setTabOrder(self.interface_, self.remember_me)
self.setTabOrder(self.remember_me, self.sign_in_btn)
self.icon.installEventFilter(self)
self.icon.setText('快速登录')
self.icon.setStyleSheet("""
QLabel{
image: url(:/menu/images/bee_temp_grey.png);
}
QLabel:hover{
color:#1B89CA;
border:1px solid #2B2B2B;
border-radius: 5px;
}
""")
#
self.sign_in_btn.clicked.connect(self.sign_in_slot)
self.sign_in_btn.setDisabled(True)
#
for i in self.__dict__.values():
if isinstance(i, QLineEdit):
i.setContextMenuPolicy(Qt.NoContextMenu) ######不允许右键产生子菜单
self.login_tab.currentChanged.connect(self.check_disable)
# 普通
self.userid_sim.currentTextChanged.connect(self.check_disable)
self.password_sim.textChanged.connect(self.check_disable)
#
self.userid.currentTextChanged.connect(self.check_disable)
self.password.textChanged.connect(self.check_disable)
self.brokerid.currentTextChanged.connect(self.check_disable)
self.auth_code.currentTextChanged.connect(self.check_disable)
self.appid.currentTextChanged.connect(self.check_disable)
self.td_address.currentTextChanged.connect(self.check_disable)
self.td_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.editTextChanged.connect(self.editTextChanged_slot)
self.md_address.currentTextChanged.connect(self.check_disable)
# timer
self.timer = QTimer(self)
self.timer.timeout.connect(self.close_load)
self.load_remember()
def submask(self):
self.bmp = QBitmap(self.size())
self.bmp.fill()
self.p = QPainter(self.bmp)
self.p.setPen(Qt.black)
self.p.setBrush(Qt.black)
self.p.drawRoundedRect(self.bmp.rect(), 10, 10)
self.setMask(self.bmp)
@Slot()
def check_disable(self):
if self.login_tab.currentIndex() == 0:
if self.userid_sim.currentText() and self.password_sim.text():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
if self.login_tab.currentIndex() == 1:
if self.userid.currentText() and \
self.password.text() and \
self.brokerid.currentText() and \
self.auth_code.currentText() and \
self.appid.currentText() and \
self.td_address.currentText() and \
self.md_address.currentText():
self.sign_in_btn.setEnabled(True)
else:
self.sign_in_btn.setDisabled(True)
def editTextChanged_slot(self):
td = self.td_address.currentText()
md = self.md_address.currentText()
k = 'tcp://'
if not md.startswith(k):
self.md_address.setCurrentText(k + md)
if not td.startswith(k):
self.td_address.setCurrentText(k + td)
def load_remember(self):
def get_account(path):
data = {}
with open(path, 'r')as f:
info = f.read()
if info:
try:
data = json.loads(info)
if not isinstance(info, dict):
raise Exception
except Exception:
pass
return data
path_list = os.listdir(desktop_path)
for i in path_list:
path = join_path(desktop_path, i, '.account.json')
if os.path.exists(path):
info = get_account(path)
self.userid.addItem(info.get('userid'))
self.brokerid.addItem(info.get('brokerid'))
self.auth_code.addItem(info.get('auth_code'))
self.appid.addItem(info.get('appid'))
self.td_address.addItem(info.get('td_address'))
self.md_address.addItem(info.get('md_address'))
self.interface_.addItem(info.get('interface'))
path = join_path(desktop_path, i, '.sim.json')
if os.path.exists(path):
info = get_account(path)
self.userid_sim.addItem(info.get('userid'))
self.password_sim.setText(info.get('password'))
self.remember_me_sim.setChecked(True)
def load_config(self):
for k, v in G.config.to_dict().items():
if v:
current_app.config.update({k: v})
def close_load(self):
self.loading.close()
self.timer.stop()
def sign_in(self, info):
bee_app = CtpBee(name=info.get("username"), import_name=__name__, refresh=True)
login_info = {
"CONNECT_INFO": info,
"INTERFACE": info.get('interface'),
"TD_FUNC": True,
"MD_FUNC": True,
}
bee_app.config.from_mapping(login_info)
bee_app.start()
# loading
self.loading = LoadingDialog()
G.loading = self.loading
self.timer.start(2000) # ms
self.loading.msg.setText("正在连接服务器...")
self.loading.exec_()
if bee_app and \
bee_app.trader and \
bee_app.td_login_status:
##
G.signin_success(info['userid'])
##
self.load_config()
###
mainwindow = MainWindow()
mainwindow.sign_in_success()
mainwindow.show()
self.close()
return True
else:
return False
def sign_in_slot(self):
if self.login_tab.currentIndex() == 0:
self.common_sign_in()
elif self.login_tab.currentIndex() == 1:
self.detailed_sign_in()
def common_sign_in(self):
info = dict(
userid=self.userid_sim.currentText(),
password=self.password_sim.text(),
interface=self.interface_sim.currentText(),
)
which_ = self.other.currentText()
if which_ == 'simnow24小时':
info.update(simnow_24)
elif which_ == 'simnow移动':
info.update(simnow_yd)
if self.sign_in(info):
if self.remember_me_sim.isChecked():
account_path = os.path.join(G.user_path, ".sim.json")
with open(account_path, 'w') as f:
json.dump(info, f)
else:
if which_ == 'simnow24小时':
msg = 'simnow移动'
info.update(simnow_yd)
else:
msg = 'simnow24小时'
info.update(simnow_24)
reply = QMessageBox.question(self, '登录出现错误', "是否尝试" + msg,
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.interface_sim.setCurrentText(msg)
if not self.sign_in(info):
QMessageBox.information(self, "提示", "登录失败")
def detailed_sign_in(self):
info = dict(
userid=self.userid.currentText(),
password=self.password.text(),
brokerid=self.brokerid.currentText(),
md_address=self.md_address.currentText(),
td_address=self.td_address.currentText(),
product_info="",
appid=self.appid.currentText(),
auth_code=self.auth_code.currentText(),
interface=self.interface_.currentText(),
| simnow_24 = dict(
brokerid="9999",
| random_line_split |
context.rs | //!The [`Context`][context] contains all the input data for the request
//!handlers, as well as some utilities. This is where request data, like
//!headers, client address and the request body can be retrieved from and it
//!can safely be picked apart, since its ownership is transferred to the
//!handler.
//!
//!##Accessing Headers
//!
//!The headers are stored in the `headers` field. See the [`Headers`][headers]
//!struct for more information about how to access them.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::header::UserAgent;
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(&UserAgent(ref user_agent)) = context.headers.get() {
//! response.send(format!("got user agent string \"{}\"", user_agent));
//! } else {
//! response.send("no user agent string provided");
//! }
//!}
//!```
//!
//!##Path Variables
//!
//!A router may collect variable data from paths (for example `id` in
//!`/products/:id`). The values from these variables can be accessed through
//!the `variables` field.
//!
//!```
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! if let Some(id) = context.variables.get("id") {
//! response.send(format!("asking for product with id \"{}\"", id));
//! } else {
//! //This will usually not happen, unless the handler is also
//! //assigned to a path without the `id` variable
//! response.send("no id provided");
//! }
//!}
//!```
//!
//!##Other URL Parts
//!
//! * Query variables (`http://example.com?a=b&c=d`) can be found in the
//!`query` field and they are accessed in exactly the same fashion as path
//!variables are used.
//!
//! * The fragment (`http://example.com#foo`) is also parsed and can be
//!accessed through `fragment` as an optional `String`.
//!
//!##Logging
//!
//!Rustful has a built in logging infrastructure and it is made available to
//!handlers through the `log` field. This provides logging and error reporting
//!in a unified and more controlled fashion than what panics and `println!`
//!gives. See the [`log`][log] module for more information about the standard
//!alternatives.
//!
//!```
//!# fn something_that_may_fail() -> Result<&'static str, &'static str> { Ok("yo") }
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! match something_that_may_fail() {
//! Ok(res) => response.send(res),
//! Err(e) => {
//! context.log.error(&format!("it failed! {}", e));
//! response.set_status(InternalServerError);
//! }
//! }
//!}
//!```
//!
//!##Global Data
//!
//!There is also infrastructure for globally accessible data, that can be
//!accessed through the `global` field. This is meant to provide a place for
//!things like database connections or cached data that should be available to
//!all handlers. The storage space itself is immutable when the server has
//!started, so the only way to change it is through some kind of inner
//!mutability.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! if let Some(some_wise_words) = context.global.get::<&str>() {
//! response.send(format!("food for thought: {}", some_wise_words));
//! } else {
//! context.log.error("there should be a string literal in `global`");
//! response.set_status(InternalServerError);
//! }
//!}
//!```
//!
//!##Request Body
//!
//!The body will not be read in advance, unlike the other parts of the
//!request. It is instead available as a `BodyReader` in the field `body`,
//!through which it can be read and parsed as various data formats, like JSON
//!and query strings. The documentation for [`BodyReader`][body_reader] gives
//!more examples.
//!
//!```
//!use std::io::{BufReader, BufRead};
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! let mut numbered_lines = BufReader::new(context.body).lines().enumerate();
//! let mut writer = response.into_chunked();
//!
//! while let Some((line_no, Ok(line))) = numbered_lines.next() {
//! writer.send(format!("{}: {}", line_no + 1, line));
//! }
//!}
//!```
//!
//![context]: struct.Context.html
//![headers]: ../header/struct.Headers.html
//![log]: ../log/index.html
//![body_reader]: struct.BodyReader.html
use std::collections::HashMap;
use std::io::{self, Read};
use std::net::SocketAddr;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::json;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::Decodable;
use hyper::http::h1::HttpReader;
use hyper::net::NetworkStream;
use hyper::buffer::BufReader;
#[cfg(feature = "multipart")]
use multipart::server::{HttpRequest, Multipart};
use utils;
use HttpVersion;
use Method;
use header::Headers;
use log::Log;
use Global;
///A container for handler input, like request data and utilities.
pub struct Context<'a, 'b: 'a, 's> {
///Headers from the HTTP request.
pub headers: Headers,
///The HTTP version used in the request.
pub http_version: HttpVersion,
///The client address
pub address: SocketAddr,
///The HTTP method.
pub method: Method,
///The requested path.
pub path: String,
///Hypermedia from the current endpoint.
pub hypermedia: Hypermedia<'s>,
///Route variables.
pub variables: HashMap<String, String>,
///Query variables from the path.
pub query: HashMap<String, String>,
///The fragment part of the URL (after #), if provided.
pub fragment: Option<String>,
///Log for notes, errors and warnings.
pub log: &'s (Log + 's),
///Globally accessible data.
pub global: &'s Global,
///A reader for the request body.
pub body: BodyReader<'a, 'b>,
}
///A reader for a request body.
pub struct BodyReader<'a, 'b: 'a> {
reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>,
#[cfg(feature = "multipart")]
multipart_boundary: Option<String>
}
#[cfg(feature = "multipart")]
impl<'a, 'b> BodyReader<'a, 'b> {
///Try to create a `multipart/form-data` reader from the request body.
///
///```
///# extern crate rustful;
///# extern crate multipart;
///use std::fmt::Write;
///use rustful::{Context, Response};
///use rustful::StatusCode::BadRequest;
///use multipart::server::MultipartData;
///
///fn my_handler(mut context: Context, mut response: Response) {
/// if let Some(mut multipart) = context.body.as_multipart() {
/// let mut result = String::new();
///
/// //Iterate over the multipart entries and print info about them in `result`
/// multipart.foreach_entry(|entry| match entry.data {
/// MultipartData::Text(text) => {
/// //Found data from a text field
/// writeln!(&mut result, "{}: '{}'", entry.name, text);
/// },
/// MultipartData::File(file) => {
/// //Found an uploaded file
/// if let Some(file_name) = file.filename() {
/// writeln!(&mut result, "{}: a file called '{}'", entry.name, file_name);
/// } else {
/// writeln!(&mut result, "{}: a nameless file", entry.name);
/// }
/// }
/// });
///
/// response.send(result);
/// } else {
/// //We expected it to be a valid `multipart/form-data` request, but it was not
/// response.set_status(BadRequest);
/// }
///}
///# fn main() {}
///```
pub fn as_multipart<'r>(&'r mut self) -> Option<Multipart<MultipartRequest<'r, 'a, 'b>>> {
let reader = &mut self.reader;
self.multipart_boundary.as_ref().and_then(move |boundary|
Multipart::from_request(MultipartRequest {
boundary: boundary,
reader: reader
}).ok()
)
}
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, headers: &Headers) -> BodyReader<'a, 'b> {
use header::ContentType;
use mime::{Mime, TopLevel, SubLevel, Attr, Value};
| //!Handler context and request body reading extensions.
//!
//!#Context
//! | random_line_split |
|
context.rs | Result<&'static str, &'static str> { Ok("yo") }
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! match something_that_may_fail() {
//! Ok(res) => response.send(res),
//! Err(e) => {
//! context.log.error(&format!("it failed! {}", e));
//! response.set_status(InternalServerError);
//! }
//! }
//!}
//!```
//!
//!##Global Data
//!
//!There is also infrastructure for globally accessible data, that can be
//!accessed through the `global` field. This is meant to provide a place for
//!things like database connections or cached data that should be available to
//!all handlers. The storage space itself is immutable when the server has
//!started, so the only way to change it is through some kind of inner
//!mutability.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! if let Some(some_wise_words) = context.global.get::<&str>() {
//! response.send(format!("food for thought: {}", some_wise_words));
//! } else {
//! context.log.error("there should be a string literal in `global`");
//! response.set_status(InternalServerError);
//! }
//!}
//!```
//!
//!##Request Body
//!
//!The body will not be read in advance, unlike the other parts of the
//!request. It is instead available as a `BodyReader` in the field `body`,
//!through which it can be read and parsed as various data formats, like JSON
//!and query strings. The documentation for [`BodyReader`][body_reader] gives
//!more examples.
//!
//!```
//!use std::io::{BufReader, BufRead};
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! let mut numbered_lines = BufReader::new(context.body).lines().enumerate();
//! let mut writer = response.into_chunked();
//!
//! while let Some((line_no, Ok(line))) = numbered_lines.next() {
//! writer.send(format!("{}: {}", line_no + 1, line));
//! }
//!}
//!```
//!
//![context]: struct.Context.html
//![headers]: ../header/struct.Headers.html
//![log]: ../log/index.html
//![body_reader]: struct.BodyReader.html
use std::collections::HashMap;
use std::io::{self, Read};
use std::net::SocketAddr;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::json;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::Decodable;
use hyper::http::h1::HttpReader;
use hyper::net::NetworkStream;
use hyper::buffer::BufReader;
#[cfg(feature = "multipart")]
use multipart::server::{HttpRequest, Multipart};
use utils;
use HttpVersion;
use Method;
use header::Headers;
use log::Log;
use Global;
///A container for handler input, like request data and utilities.
pub struct Context<'a, 'b: 'a, 's> {
///Headers from the HTTP request.
pub headers: Headers,
///The HTTP version used in the request.
pub http_version: HttpVersion,
///The client address
pub address: SocketAddr,
///The HTTP method.
pub method: Method,
///The requested path.
pub path: String,
///Hypermedia from the current endpoint.
pub hypermedia: Hypermedia<'s>,
///Route variables.
pub variables: HashMap<String, String>,
///Query variables from the path.
pub query: HashMap<String, String>,
///The fragment part of the URL (after #), if provided.
pub fragment: Option<String>,
///Log for notes, errors and warnings.
pub log: &'s (Log + 's),
///Globally accessible data.
pub global: &'s Global,
///A reader for the request body.
pub body: BodyReader<'a, 'b>,
}
///A reader for a request body.
pub struct BodyReader<'a, 'b: 'a> {
reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>,
#[cfg(feature = "multipart")]
multipart_boundary: Option<String>
}
#[cfg(feature = "multipart")]
impl<'a, 'b> BodyReader<'a, 'b> {
///Try to create a `multipart/form-data` reader from the request body.
///
///```
///# extern crate rustful;
///# extern crate multipart;
///use std::fmt::Write;
///use rustful::{Context, Response};
///use rustful::StatusCode::BadRequest;
///use multipart::server::MultipartData;
///
///fn my_handler(mut context: Context, mut response: Response) {
/// if let Some(mut multipart) = context.body.as_multipart() {
/// let mut result = String::new();
///
/// //Iterate over the multipart entries and print info about them in `result`
/// multipart.foreach_entry(|entry| match entry.data {
/// MultipartData::Text(text) => {
/// //Found data from a text field
/// writeln!(&mut result, "{}: '{}'", entry.name, text);
/// },
/// MultipartData::File(file) => {
/// //Found an uploaded file
/// if let Some(file_name) = file.filename() {
/// writeln!(&mut result, "{}: a file called '{}'", entry.name, file_name);
/// } else {
/// writeln!(&mut result, "{}: a nameless file", entry.name);
/// }
/// }
/// });
///
/// response.send(result);
/// } else {
/// //We expected it to be a valid `multipart/form-data` request, but it was not
/// response.set_status(BadRequest);
/// }
///}
///# fn main() {}
///```
pub fn as_multipart<'r>(&'r mut self) -> Option<Multipart<MultipartRequest<'r, 'a, 'b>>> {
let reader = &mut self.reader;
self.multipart_boundary.as_ref().and_then(move |boundary|
Multipart::from_request(MultipartRequest {
boundary: boundary,
reader: reader
}).ok()
)
}
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, headers: &Headers) -> BodyReader<'a, 'b> {
use header::ContentType;
use mime::{Mime, TopLevel, SubLevel, Attr, Value};
let boundary = match headers.get() {
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, ref attrs))) => {
attrs.iter()
.find(|&&(ref attr, _)| attr == &Attr::Boundary)
.and_then(|&(_, ref val)| if let Value::Ext(ref boundary) = *val | else {
None
})
},
_ => None
};
BodyReader {
reader: reader,
multipart_boundary: boundary
}
}
}
#[cfg(not(feature = "multipart"))]
impl<'a, 'b> BodyReader<'a, 'b> {
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, _headers: &Headers) -> BodyReader<'a, 'b> {
BodyReader {
reader: reader
}
}
}
///`BodyReader` extension for reading and parsing a query string.
///
///Examples and more information can be found in [the documentation for
///`BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
pub trait ExtQueryBody {
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>>;
}
impl<'a, 'b> ExtQueryBody for BodyReader<'a, 'b> {
///Read and parse the request body as a query string. The body will be
///decoded as UTF-8 and plain '+' characters will be replaced with spaces.
///
///A simplified example of how to parse `a=number&b=number`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtQueryBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as a query string
/// let query = context.body.read_query_body().unwrap();
///
/// //Find "a" and "b" and assume that they are numbers
/// let a: f64 = query.get("a").and_then(|number| number.parse().ok()).unwrap();
/// let b: f64 = query.get("b").and_then(|number| number.parse().ok()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
#[inline]
fn read_query_body(&mut self) -> io::Result<HashMap<String, | {
Some(boundary.clone())
} | conditional_block |
context.rs | Result<&'static str, &'static str> { Ok("yo") }
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! match something_that_may_fail() {
//! Ok(res) => response.send(res),
//! Err(e) => {
//! context.log.error(&format!("it failed! {}", e));
//! response.set_status(InternalServerError);
//! }
//! }
//!}
//!```
//!
//!##Global Data
//!
//!There is also infrastructure for globally accessible data, that can be
//!accessed through the `global` field. This is meant to provide a place for
//!things like database connections or cached data that should be available to
//!all handlers. The storage space itself is immutable when the server has
//!started, so the only way to change it is through some kind of inner
//!mutability.
//!
//!```
//!use rustful::{Context, Response};
//!use rustful::StatusCode::InternalServerError;
//!
//!fn my_handler(context: Context, mut response: Response) {
//! if let Some(some_wise_words) = context.global.get::<&str>() {
//! response.send(format!("food for thought: {}", some_wise_words));
//! } else {
//! context.log.error("there should be a string literal in `global`");
//! response.set_status(InternalServerError);
//! }
//!}
//!```
//!
//!##Request Body
//!
//!The body will not be read in advance, unlike the other parts of the
//!request. It is instead available as a `BodyReader` in the field `body`,
//!through which it can be read and parsed as various data formats, like JSON
//!and query strings. The documentation for [`BodyReader`][body_reader] gives
//!more examples.
//!
//!```
//!use std::io::{BufReader, BufRead};
//!use rustful::{Context, Response};
//!
//!fn my_handler(context: Context, response: Response) {
//! let mut numbered_lines = BufReader::new(context.body).lines().enumerate();
//! let mut writer = response.into_chunked();
//!
//! while let Some((line_no, Ok(line))) = numbered_lines.next() {
//! writer.send(format!("{}: {}", line_no + 1, line));
//! }
//!}
//!```
//!
//![context]: struct.Context.html
//![headers]: ../header/struct.Headers.html
//![log]: ../log/index.html
//![body_reader]: struct.BodyReader.html
use std::collections::HashMap;
use std::io::{self, Read};
use std::net::SocketAddr;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::json;
#[cfg(feature = "rustc_json_body")]
use rustc_serialize::Decodable;
use hyper::http::h1::HttpReader;
use hyper::net::NetworkStream;
use hyper::buffer::BufReader;
#[cfg(feature = "multipart")]
use multipart::server::{HttpRequest, Multipart};
use utils;
use HttpVersion;
use Method;
use header::Headers;
use log::Log;
use Global;
///A container for handler input, like request data and utilities.
pub struct Context<'a, 'b: 'a, 's> {
///Headers from the HTTP request.
pub headers: Headers,
///The HTTP version used in the request.
pub http_version: HttpVersion,
///The client address
pub address: SocketAddr,
///The HTTP method.
pub method: Method,
///The requested path.
pub path: String,
///Hypermedia from the current endpoint.
pub hypermedia: Hypermedia<'s>,
///Route variables.
pub variables: HashMap<String, String>,
///Query variables from the path.
pub query: HashMap<String, String>,
///The fragment part of the URL (after #), if provided.
pub fragment: Option<String>,
///Log for notes, errors and warnings.
pub log: &'s (Log + 's),
///Globally accessible data.
pub global: &'s Global,
///A reader for the request body.
pub body: BodyReader<'a, 'b>,
}
///A reader for a request body.
pub struct | <'a, 'b: 'a> {
reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>,
#[cfg(feature = "multipart")]
multipart_boundary: Option<String>
}
#[cfg(feature = "multipart")]
impl<'a, 'b> BodyReader<'a, 'b> {
///Try to create a `multipart/form-data` reader from the request body.
///
///```
///# extern crate rustful;
///# extern crate multipart;
///use std::fmt::Write;
///use rustful::{Context, Response};
///use rustful::StatusCode::BadRequest;
///use multipart::server::MultipartData;
///
///fn my_handler(mut context: Context, mut response: Response) {
/// if let Some(mut multipart) = context.body.as_multipart() {
/// let mut result = String::new();
///
/// //Iterate over the multipart entries and print info about them in `result`
/// multipart.foreach_entry(|entry| match entry.data {
/// MultipartData::Text(text) => {
/// //Found data from a text field
/// writeln!(&mut result, "{}: '{}'", entry.name, text);
/// },
/// MultipartData::File(file) => {
/// //Found an uploaded file
/// if let Some(file_name) = file.filename() {
/// writeln!(&mut result, "{}: a file called '{}'", entry.name, file_name);
/// } else {
/// writeln!(&mut result, "{}: a nameless file", entry.name);
/// }
/// }
/// });
///
/// response.send(result);
/// } else {
/// //We expected it to be a valid `multipart/form-data` request, but it was not
/// response.set_status(BadRequest);
/// }
///}
///# fn main() {}
///```
pub fn as_multipart<'r>(&'r mut self) -> Option<Multipart<MultipartRequest<'r, 'a, 'b>>> {
let reader = &mut self.reader;
self.multipart_boundary.as_ref().and_then(move |boundary|
Multipart::from_request(MultipartRequest {
boundary: boundary,
reader: reader
}).ok()
)
}
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, headers: &Headers) -> BodyReader<'a, 'b> {
use header::ContentType;
use mime::{Mime, TopLevel, SubLevel, Attr, Value};
let boundary = match headers.get() {
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, ref attrs))) => {
attrs.iter()
.find(|&&(ref attr, _)| attr == &Attr::Boundary)
.and_then(|&(_, ref val)| if let Value::Ext(ref boundary) = *val {
Some(boundary.clone())
} else {
None
})
},
_ => None
};
BodyReader {
reader: reader,
multipart_boundary: boundary
}
}
}
#[cfg(not(feature = "multipart"))]
impl<'a, 'b> BodyReader<'a, 'b> {
///Internal method that may change unexpectedly.
pub fn from_reader(reader: HttpReader<&'a mut BufReader<&'b mut NetworkStream>>, _headers: &Headers) -> BodyReader<'a, 'b> {
BodyReader {
reader: reader
}
}
}
///`BodyReader` extension for reading and parsing a query string.
///
///Examples and more information can be found in [the documentation for
///`BodyReader`][body_reader].
///
///[body_reader]: struct.BodyReader.html
pub trait ExtQueryBody {
fn read_query_body(&mut self) -> io::Result<HashMap<String, String>>;
}
impl<'a, 'b> ExtQueryBody for BodyReader<'a, 'b> {
///Read and parse the request body as a query string. The body will be
///decoded as UTF-8 and plain '+' characters will be replaced with spaces.
///
///A simplified example of how to parse `a=number&b=number`:
///
///```
///use rustful::{Context, Response};
///use rustful::context::ExtQueryBody;
///
///fn my_handler(mut context: Context, response: Response) {
/// //Parse the request body as a query string
/// let query = context.body.read_query_body().unwrap();
///
/// //Find "a" and "b" and assume that they are numbers
/// let a: f64 = query.get("a").and_then(|number| number.parse().ok()).unwrap();
/// let b: f64 = query.get("b").and_then(|number| number.parse().ok()).unwrap();
///
/// response.send(format!("{} + {} = {}", a, b, a + b));
///}
///```
#[inline]
fn read_query_body(&mut self) -> io::Result<HashMap<String, | BodyReader | identifier_name |
local_cache.rs | Path, manifest_base: &Path,
data: &Path, only_cached: bool, status: &mut StatusBackend
) -> Result<LocalCache<B>> {
// If the `digest` file exists, we assume that it is valid; this is
// *essential* so that we can use a URL as our default IoProvider
// without requiring a network connection to run. If it does not
// exist, we need to query the backend.
let (digest_text, cached_digest, checked_digest) = match File::open(digest) {
Ok(f) => {
let mut text = String::new();
f.take(64).read_to_string(&mut text)?;
let cached_digest = ctry!(DigestData::from_str(&text); "corrupted SHA256 digest cache");
(text, cached_digest, false)
},
Err(e) => {
if e.kind() != IoErrorKind::NotFound {
// Unexpected error reading the digest cache file. Ruh roh!
return Err(e.into());
}
// Digest file just doesn't exist. We need to query the backend for it.
let cached_digest = ctry!(backend.get_digest(status); "could not get backend summary digest");
let text = cached_digest.to_string();
(text, cached_digest, true)
}
};
if checked_digest {
// If checked_digest is true, the digest cache file did not exist
// and we got the text fresh from the backend. So, we should write
// it out to the cache file.
let mut f = File::create(&digest)?;
writeln!(f, "{}", digest_text)?;
}
// We can now figure out which manifest to use.
let mut manifest_path = manifest_base.to_owned();
manifest_path.push(&digest_text);
manifest_path.set_extension("txt");
// Read it in, if it exists.
let mut contents = HashMap::new();
match try_open_file(&manifest_path) {
OpenResult::NotAvailable => {},
OpenResult::Err(e) => { return Err(e.into()); },
OpenResult::Ok(mfile) => {
// Note that the lock is released when the file is closed,
// which is good since BufReader::new() and BufReader::lines()
// consume their objects.
if let Err(e) = mfile.lock_shared() {
tt_warning!(status, "failed to lock manifest file \"{}\" for reading; this might be fine",
manifest_path.display(); e.into());
}
let f = BufReader::new(mfile);
for res in f.lines() {
let line = res?;
let mut bits = line.rsplitn(3, ' ');
let (original_name, length, digest) = match (bits.next(), bits.next(),
bits.next(), bits.next()) {
(Some(s), Some(t), Some(r), None) => (r, t, s),
_ => continue,
};
let name = OsString::from(original_name);
let length = match length.parse::<u64>() {
Ok(l) => l,
Err(_) => continue
};
let digest = if digest == "-" {
None
} else {
match DigestData::from_str(&digest) {
Ok(d) => Some(d),
Err(e) => {
tt_warning!(status, "ignoring bad digest data \"{}\" for \"{}\" in \"{}\"",
&digest, original_name, manifest_path.display() ; e);
continue;
}
}
};
contents.insert(name, LocalCacheItem { _length: length, digest: digest });
}
}
}
// All set.
Ok(LocalCache {
backend: backend,
digest_path: digest.to_owned(),
cached_digest: cached_digest,
checked_digest: checked_digest,
manifest_path: manifest_path,
data_path: data.to_owned(),
contents: contents,
only_cached: only_cached,
})
}
fn record_cache_result(&mut self, name: &OsStr, length: u64, digest: Option<DigestData>) -> Result<()> {
let digest_text = match digest {
Some(ref d) => d.to_string(),
None => "-".to_owned(),
};
// Due to a quirk about permissions for file locking on Windows, we
// need to add `.read(true)` to be able to lock a file opened in
// append mode.
let mut man = fs::OpenOptions::new()
.append(true)
.create(true)
.read(true)
.open(&self.manifest_path)?;
// Lock will be released when file is closed at the end of this function.
ctry!(man.lock_exclusive(); "failed to lock manifest file \"{}\" for writing", self.manifest_path.display());
if let Some(name_utf8) = name.to_str() {
if !name_utf8.contains(|c| c == '\n' || c == '\r') {
writeln!(man, "{} {} {}", name_utf8, length, digest_text)?;
}
}
self.contents.insert(name.to_owned(), LocalCacheItem { _length: length, digest: digest });
Ok(())
}
/// If we're going to make a request of the backend, we should check that
/// its digest is what we expect. If not, we do a lame thing where we
/// error out but set things up so that things should succeed if the
/// program is re-run. Exactly the lame TeX user experience that I've been
/// trying to avoid!
fn check_digest(&mut self, status: &mut StatusBackend) -> Result<()> {
if self.checked_digest {
return Ok(());
}
let dtext = match self.backend.input_open_name(OsStr::new("SHA256SUM"), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
ctry!(h.take(64).read_to_string(&mut text); "error reading {}", self.digest_path.to_string_lossy());
text
},
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg("backend does not provide needed SHA256SUM file".to_owned()).into());
},
OpenResult::Err(e) => {
return Err(e.into());
}
};
let current_digest = ctry!(DigestData::from_str(&dtext); "bad SHA256 digest from backend");
if self.cached_digest != current_digest {
// Crap! The backend isn't what we thought it was. Rewrite the
// digest file so that next time we'll start afresh.
let mut f = ctry!(File::create(&self.digest_path); "couldn\'t open {} for writing",
self.digest_path.to_string_lossy());
ctry!(writeln!(f, "{}", current_digest.to_string()); "couldn\'t write to {}",
self.digest_path.to_string_lossy());
return Err(ErrorKind::Msg("backend digest changed; rerun to use updated information".to_owned()).into());
}
// Phew, the backend hasn't changed. Don't check again.
self.checked_digest = true;
Ok(())
}
fn path_for_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<PathBuf> {
if let Some(info) = self.contents.get(name) {
return match info.digest {
None => OpenResult::NotAvailable,
Some(ref d) => match d.create_two_part_path(&self.data_path) {
Ok(p) => OpenResult::Ok(p),
Err(e) => OpenResult::Err(e.into()),
},
};
}
// The file is not in the cache and we are asked not to try to fetch it.
if self.only_cached {
return OpenResult::NotAvailable;
}
// Bummer, we haven't seen this file before. We need to (try to) fetch
// the item from the backend, saving it to disk and calculating its
// digest ourselves, then enter it in the cache and in our manifest.
// Fun times. Because we're touching the backend, we need to verify that
// its digest is what we think.
if let Err(e) = self.check_digest(status) {
return OpenResult::Err(e);
}
// The bundle's overall digest is OK. Now try open the file. If it's
// not available, cache that result, since LaTeX compilations commonly
// touch nonexistent files. If we didn't maintain the negative cache,
// we'd have to touch the network for virtually every compilation.
let mut stream = match self.backend.input_open_name (name, status) {
OpenResult::Ok(s) => s,
OpenResult::Err(e) => return OpenResult::Err(e),
OpenResult::NotAvailable => {
if let Err(e) = self.record_cache_result(name, 0, None) |
return OpenResult::NotAvailable;
}
};
// OK, we can stream the file to a temporary location on disk,
// computing its SHA256 as we go.
let mut digest_builder = digest::create();
let mut length = 0;
let mut temp_dest = match tempfile::Builder::new()
.prefix("download_")
.rand_bytes(6)
.tempfile_in(&self.data_path) {
| {
return OpenResult::Err(e.into());
} | conditional_block |
local_cache.rs | Path, manifest_base: &Path,
data: &Path, only_cached: bool, status: &mut StatusBackend
) -> Result<LocalCache<B>> {
// If the `digest` file exists, we assume that it is valid; this is
// *essential* so that we can use a URL as our default IoProvider
// without requiring a network connection to run. If it does not
// exist, we need to query the backend.
let (digest_text, cached_digest, checked_digest) = match File::open(digest) {
Ok(f) => {
let mut text = String::new();
f.take(64).read_to_string(&mut text)?;
let cached_digest = ctry!(DigestData::from_str(&text); "corrupted SHA256 digest cache");
(text, cached_digest, false)
},
Err(e) => {
if e.kind() != IoErrorKind::NotFound {
// Unexpected error reading the digest cache file. Ruh roh!
return Err(e.into());
}
// Digest file just doesn't exist. We need to query the backend for it.
let cached_digest = ctry!(backend.get_digest(status); "could not get backend summary digest");
let text = cached_digest.to_string();
(text, cached_digest, true)
}
};
if checked_digest {
// If checked_digest is true, the digest cache file did not exist
// and we got the text fresh from the backend. So, we should write
// it out to the cache file.
let mut f = File::create(&digest)?;
writeln!(f, "{}", digest_text)?;
}
// We can now figure out which manifest to use.
let mut manifest_path = manifest_base.to_owned();
manifest_path.push(&digest_text);
manifest_path.set_extension("txt");
// Read it in, if it exists.
let mut contents = HashMap::new();
match try_open_file(&manifest_path) {
OpenResult::NotAvailable => {},
OpenResult::Err(e) => { return Err(e.into()); },
OpenResult::Ok(mfile) => {
// Note that the lock is released when the file is closed,
// which is good since BufReader::new() and BufReader::lines()
// consume their objects.
if let Err(e) = mfile.lock_shared() {
tt_warning!(status, "failed to lock manifest file \"{}\" for reading; this might be fine",
manifest_path.display(); e.into());
}
let f = BufReader::new(mfile);
for res in f.lines() {
let line = res?;
let mut bits = line.rsplitn(3, ' ');
let (original_name, length, digest) = match (bits.next(), bits.next(),
bits.next(), bits.next()) {
(Some(s), Some(t), Some(r), None) => (r, t, s),
_ => continue,
};
let name = OsString::from(original_name);
let length = match length.parse::<u64>() {
Ok(l) => l,
Err(_) => continue
};
let digest = if digest == "-" {
None
} else {
match DigestData::from_str(&digest) {
Ok(d) => Some(d),
Err(e) => {
tt_warning!(status, "ignoring bad digest data \"{}\" for \"{}\" in \"{}\"",
&digest, original_name, manifest_path.display() ; e);
continue;
}
}
};
contents.insert(name, LocalCacheItem { _length: length, digest: digest });
}
}
}
// All set.
Ok(LocalCache {
backend: backend,
digest_path: digest.to_owned(),
cached_digest: cached_digest,
checked_digest: checked_digest,
manifest_path: manifest_path,
data_path: data.to_owned(),
contents: contents,
only_cached: only_cached,
})
}
fn | (&mut self, name: &OsStr, length: u64, digest: Option<DigestData>) -> Result<()> {
let digest_text = match digest {
Some(ref d) => d.to_string(),
None => "-".to_owned(),
};
// Due to a quirk about permissions for file locking on Windows, we
// need to add `.read(true)` to be able to lock a file opened in
// append mode.
let mut man = fs::OpenOptions::new()
.append(true)
.create(true)
.read(true)
.open(&self.manifest_path)?;
// Lock will be released when file is closed at the end of this function.
ctry!(man.lock_exclusive(); "failed to lock manifest file \"{}\" for writing", self.manifest_path.display());
if let Some(name_utf8) = name.to_str() {
if !name_utf8.contains(|c| c == '\n' || c == '\r') {
writeln!(man, "{} {} {}", name_utf8, length, digest_text)?;
}
}
self.contents.insert(name.to_owned(), LocalCacheItem { _length: length, digest: digest });
Ok(())
}
/// If we're going to make a request of the backend, we should check that
/// its digest is what we expect. If not, we do a lame thing where we
/// error out but set things up so that things should succeed if the
/// program is re-run. Exactly the lame TeX user experience that I've been
/// trying to avoid!
fn check_digest(&mut self, status: &mut StatusBackend) -> Result<()> {
if self.checked_digest {
return Ok(());
}
let dtext = match self.backend.input_open_name(OsStr::new("SHA256SUM"), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
ctry!(h.take(64).read_to_string(&mut text); "error reading {}", self.digest_path.to_string_lossy());
text
},
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg("backend does not provide needed SHA256SUM file".to_owned()).into());
},
OpenResult::Err(e) => {
return Err(e.into());
}
};
let current_digest = ctry!(DigestData::from_str(&dtext); "bad SHA256 digest from backend");
if self.cached_digest != current_digest {
// Crap! The backend isn't what we thought it was. Rewrite the
// digest file so that next time we'll start afresh.
let mut f = ctry!(File::create(&self.digest_path); "couldn\'t open {} for writing",
self.digest_path.to_string_lossy());
ctry!(writeln!(f, "{}", current_digest.to_string()); "couldn\'t write to {}",
self.digest_path.to_string_lossy());
return Err(ErrorKind::Msg("backend digest changed; rerun to use updated information".to_owned()).into());
}
// Phew, the backend hasn't changed. Don't check again.
self.checked_digest = true;
Ok(())
}
fn path_for_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<PathBuf> {
if let Some(info) = self.contents.get(name) {
return match info.digest {
None => OpenResult::NotAvailable,
Some(ref d) => match d.create_two_part_path(&self.data_path) {
Ok(p) => OpenResult::Ok(p),
Err(e) => OpenResult::Err(e.into()),
},
};
}
// The file is not in the cache and we are asked not to try to fetch it.
if self.only_cached {
return OpenResult::NotAvailable;
}
// Bummer, we haven't seen this file before. We need to (try to) fetch
// the item from the backend, saving it to disk and calculating its
// digest ourselves, then enter it in the cache and in our manifest.
// Fun times. Because we're touching the backend, we need to verify that
// its digest is what we think.
if let Err(e) = self.check_digest(status) {
return OpenResult::Err(e);
}
// The bundle's overall digest is OK. Now try open the file. If it's
// not available, cache that result, since LaTeX compilations commonly
// touch nonexistent files. If we didn't maintain the negative cache,
// we'd have to touch the network for virtually every compilation.
let mut stream = match self.backend.input_open_name (name, status) {
OpenResult::Ok(s) => s,
OpenResult::Err(e) => return OpenResult::Err(e),
OpenResult::NotAvailable => {
if let Err(e) = self.record_cache_result(name, 0, None) {
return OpenResult::Err(e.into());
}
return OpenResult::NotAvailable;
}
};
// OK, we can stream the file to a temporary location on disk,
// computing its SHA256 as we go.
let mut digest_builder = digest::create();
let mut length = 0;
let mut temp_dest = match tempfile::Builder::new()
.prefix("download_")
.rand_bytes(6)
.tempfile_in(&self.data_path) {
| record_cache_result | identifier_name |
local_cache.rs | Path, manifest_base: &Path,
data: &Path, only_cached: bool, status: &mut StatusBackend
) -> Result<LocalCache<B>> {
// If the `digest` file exists, we assume that it is valid; this is
// *essential* so that we can use a URL as our default IoProvider
// without requiring a network connection to run. If it does not
// exist, we need to query the backend.
let (digest_text, cached_digest, checked_digest) = match File::open(digest) {
Ok(f) => {
let mut text = String::new();
f.take(64).read_to_string(&mut text)?;
let cached_digest = ctry!(DigestData::from_str(&text); "corrupted SHA256 digest cache");
(text, cached_digest, false)
},
Err(e) => {
if e.kind() != IoErrorKind::NotFound {
// Unexpected error reading the digest cache file. Ruh roh!
return Err(e.into());
}
// Digest file just doesn't exist. We need to query the backend for it.
let cached_digest = ctry!(backend.get_digest(status); "could not get backend summary digest");
let text = cached_digest.to_string();
(text, cached_digest, true)
}
};
if checked_digest {
// If checked_digest is true, the digest cache file did not exist
// and we got the text fresh from the backend. So, we should write
// it out to the cache file.
let mut f = File::create(&digest)?;
writeln!(f, "{}", digest_text)?;
}
// We can now figure out which manifest to use.
let mut manifest_path = manifest_base.to_owned();
manifest_path.push(&digest_text);
manifest_path.set_extension("txt");
// Read it in, if it exists.
let mut contents = HashMap::new();
match try_open_file(&manifest_path) {
OpenResult::NotAvailable => {},
OpenResult::Err(e) => { return Err(e.into()); },
OpenResult::Ok(mfile) => {
// Note that the lock is released when the file is closed,
// which is good since BufReader::new() and BufReader::lines()
// consume their objects.
if let Err(e) = mfile.lock_shared() {
tt_warning!(status, "failed to lock manifest file \"{}\" for reading; this might be fine",
manifest_path.display(); e.into());
}
let f = BufReader::new(mfile);
for res in f.lines() {
let line = res?;
let mut bits = line.rsplitn(3, ' ');
let (original_name, length, digest) = match (bits.next(), bits.next(),
bits.next(), bits.next()) {
(Some(s), Some(t), Some(r), None) => (r, t, s),
_ => continue,
};
let name = OsString::from(original_name);
let length = match length.parse::<u64>() {
Ok(l) => l,
Err(_) => continue
};
let digest = if digest == "-" {
None
} else {
match DigestData::from_str(&digest) {
Ok(d) => Some(d),
Err(e) => {
tt_warning!(status, "ignoring bad digest data \"{}\" for \"{}\" in \"{}\"",
&digest, original_name, manifest_path.display() ; e);
continue;
}
}
};
contents.insert(name, LocalCacheItem { _length: length, digest: digest });
}
}
}
// All set.
Ok(LocalCache {
backend: backend,
digest_path: digest.to_owned(),
cached_digest: cached_digest,
checked_digest: checked_digest,
manifest_path: manifest_path,
data_path: data.to_owned(),
contents: contents,
only_cached: only_cached,
})
}
fn record_cache_result(&mut self, name: &OsStr, length: u64, digest: Option<DigestData>) -> Result<()> {
let digest_text = match digest {
Some(ref d) => d.to_string(),
None => "-".to_owned(),
};
// Due to a quirk about permissions for file locking on Windows, we
// need to add `.read(true)` to be able to lock a file opened in
// append mode.
let mut man = fs::OpenOptions::new()
.append(true)
.create(true)
.read(true)
.open(&self.manifest_path)?;
// Lock will be released when file is closed at the end of this function.
ctry!(man.lock_exclusive(); "failed to lock manifest file \"{}\" for writing", self.manifest_path.display());
if let Some(name_utf8) = name.to_str() {
if !name_utf8.contains(|c| c == '\n' || c == '\r') {
writeln!(man, "{} {} {}", name_utf8, length, digest_text)?;
}
}
self.contents.insert(name.to_owned(), LocalCacheItem { _length: length, digest: digest });
Ok(())
}
/// If we're going to make a request of the backend, we should check that
/// its digest is what we expect. If not, we do a lame thing where we
/// error out but set things up so that things should succeed if the
/// program is re-run. Exactly the lame TeX user experience that I've been
/// trying to avoid!
fn check_digest(&mut self, status: &mut StatusBackend) -> Result<()> {
if self.checked_digest {
return Ok(());
}
let dtext = match self.backend.input_open_name(OsStr::new("SHA256SUM"), status) {
OpenResult::Ok(h) => {
let mut text = String::new();
ctry!(h.take(64).read_to_string(&mut text); "error reading {}", self.digest_path.to_string_lossy());
text
},
OpenResult::NotAvailable => {
// Broken or un-cacheable backend.
return Err(ErrorKind::Msg("backend does not provide needed SHA256SUM file".to_owned()).into());
},
OpenResult::Err(e) => {
return Err(e.into());
}
};
let current_digest = ctry!(DigestData::from_str(&dtext); "bad SHA256 digest from backend");
if self.cached_digest != current_digest {
// Crap! The backend isn't what we thought it was. Rewrite the
// digest file so that next time we'll start afresh.
let mut f = ctry!(File::create(&self.digest_path); "couldn\'t open {} for writing",
self.digest_path.to_string_lossy());
ctry!(writeln!(f, "{}", current_digest.to_string()); "couldn\'t write to {}",
self.digest_path.to_string_lossy());
return Err(ErrorKind::Msg("backend digest changed; rerun to use updated information".to_owned()).into());
}
// Phew, the backend hasn't changed. Don't check again.
self.checked_digest = true;
Ok(())
}
fn path_for_name(&mut self, name: &OsStr, status: &mut StatusBackend) -> OpenResult<PathBuf> {
if let Some(info) = self.contents.get(name) {
return match info.digest {
None => OpenResult::NotAvailable,
Some(ref d) => match d.create_two_part_path(&self.data_path) {
Ok(p) => OpenResult::Ok(p),
Err(e) => OpenResult::Err(e.into()),
},
};
}
// The file is not in the cache and we are asked not to try to fetch it.
if self.only_cached {
return OpenResult::NotAvailable;
}
// Bummer, we haven't seen this file before. We need to (try to) fetch
// the item from the backend, saving it to disk and calculating its
// digest ourselves, then enter it in the cache and in our manifest.
// Fun times. Because we're touching the backend, we need to verify that
// its digest is what we think.
if let Err(e) = self.check_digest(status) {
return OpenResult::Err(e);
}
// The bundle's overall digest is OK. Now try open the file. If it's
// not available, cache that result, since LaTeX compilations commonly
// touch nonexistent files. If we didn't maintain the negative cache,
// we'd have to touch the network for virtually every compilation.
let mut stream = match self.backend.input_open_name (name, status) {
OpenResult::Ok(s) => s,
OpenResult::Err(e) => return OpenResult::Err(e),
OpenResult::NotAvailable => {
if let Err(e) = self.record_cache_result(name, 0, None) {
return OpenResult::Err(e.into());
}
return OpenResult::NotAvailable;
}
};
// OK, we can stream the file to a temporary location on disk, | let mut temp_dest = match tempfile::Builder::new()
.prefix("download_")
.rand_bytes(6)
.tempfile_in(&self.data_path) {
| // computing its SHA256 as we go.
let mut digest_builder = digest::create();
let mut length = 0;
| random_line_split |
mod.rs | => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: See above
warn!("No previous link found");
}
}
fn download_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
DownloadLink(href)
}
// Handle the key press event for the hint mode.
// This hides the hints that are not relevant anymore.
fn enter_hint_key(&mut self, key: char) {
self.model.hint_keys.push(key);
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
// If no element is found, hide the unrelevant hints.
if element.is_some() {
// TODO: perhaps it'd involve less message if we remove the ActivateHint message.
self.send(ClickHintElement());
}
else {
let document = self.model.page.dom_document();
if let Some(document) = document {
let all_hidden = hide_unrelevant_hints(&document, &self.model.hint_keys);
if all_hidden {
self.model.hint_keys.clear();
show_all_hints(&document);
}
}
}
}
// Focus the first input element.
fn focus_input(&mut self) {
let document = self.model.page.dom_document();
if let Some(document) = document {
let tag_names = ["input", "textarea"];
let mut element_to_focus = None;
let mut element_y_pos = f32::INFINITY;
for tag_name in &tag_names {
let iter = get_elements_by_tag_name_in_all_frames(&document, tag_name);
for (document, element) in iter {
let tabindex = element.attribute("tabindex").map(Into::into);
if !is_hidden(&document, &element) && is_enabled(&element) && is_text_input(&element)
&& tabindex != Some("-1".to_string())
{
if let Some(pos) = get_position(&element) {
// TODO: If y is equal, compare x?
if pos.y < element_y_pos {
element_y_pos = pos.y;
element_to_focus = Some(element);
}
}
}
}
}
if let Some(element) = element_to_focus {
element.focus();
element.scroll_into_view_if_needed(false);
self.send(EnterInsertMode());
}
}
}
// Hide all the hints.
fn hide_hints(&self) {
let elements =
self.model.page.dom_document()
.and_then(|document| document.element_by_id(HINTS_ID))
.and_then(|hints| get_hints_container(&self.model.page).map(|container| (hints, container)));
if let Some((hints, container)) = elements {
check_err!(container.remove_child(&hints));
}
}
fn hover(&mut self, element: DOMHTMLElement) -> Action {
if let Some(ref element) = self.model.last_hovered_element {
mouse_out(element);
}
self.model.last_hovered_element = Some(element.clone().upcast());
mouse_over(&element.upcast());
NoAction
}
fn insert_text(&self, text: &str) {
let document = get_document!(self);
let active_element = wtry_opt_no_ret!(document.active_element());
let element = wtry_no_show!(active_element.downcast::<DOMHTMLInputElement>());
element.set_value(text);
}
// Load the username and the password in the login form.
fn load_username_pass(&self, username: &str, password: &str) {
let document = get_document!(self);
load_username(&document, username);
load_password(&document, password);
}
// Set the selected file on the input[type="file"].
fn select_file(&mut self, file: &str) {
if let Some(ref input_file) = self.model.activated_file_input.take() {
// FIXME: this is not working.
input_file.set_value(file);
}
}
fn send(& | self | identifier_name |
|
mod.rs | _event_listener_with_closure("scroll", &handler, false);
}
else {
let element = self.model.scroll_element.as_ref().unwrap();
element.add_event_listener_with_closure("scroll", &handler, false);
}
},
MessageRecv(msg) =>
match msg {
ActivateHint(follow_mode, ctrl_key) => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: See above
warn!("No previous link found");
}
}
fn download_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
DownloadLink(href)
}
// Handle the key press event for the hint mode.
// This hides the hints that are not relevant anymore.
fn enter_hint_key(&mut self, key: char) {
self.model.hint_keys.push(key);
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
// If no element is found, hide the unrelevant hints.
if element.is_some() {
// TODO: perhaps it'd involve less message if we remove the ActivateHint message.
self.send(ClickHintElement());
}
else {
let document = self.model.page.dom_document();
if let Some(document) = document {
let all_hidden = hide_unrelevant_hints(&document, &self.model.hint_keys);
if all_hidden {
self.model.hint_keys.clear();
show_all_hints(&document);
}
}
}
}
// Focus the first input element.
fn focus_input(&mut self) {
let document = self.model.page.dom_document();
if let Some(document) = document {
let tag_names = ["input", "textarea"];
let mut element_to_focus = None;
let mut element_y_pos = f32::INFINITY;
for tag_name in &tag_names {
let iter = get_elements_by_tag_name_in_all_frames(&document, tag_name);
for (document, element) in iter {
let tabindex = element.attribute("tabindex").map(Into::into);
if !is_hidden(&document, &element) && is_enabled(&element) && is_text_input(&element)
&& tabindex != Some("-1".to_string())
{
if let Some(pos) = get_position(&element) {
// TODO: If y is equal, compare x?
if pos.y < element_y_pos {
element_y_pos = pos.y;
element_to_focus = Some(element);
}
}
}
}
}
if let Some(element) = element_to_focus {
element.focus();
element.scroll_into_view_if_needed(false);
self.send(EnterInsertMode());
}
}
}
// Hide all the hints.
fn hide_hints(&self) {
let elements =
self.model.page.dom_document()
.and_then(|document| document.element_by_id(HINTS_ID))
.and_then(|hints| get_hints_container(&self.model.page).map(|container| (hints, container)));
if let Some((hints, container)) = elements {
check_err!(container.remove_child(&hints));
}
}
fn hover(&mut self, element: DOMHTMLElement) -> Action {
if let Some(ref element) = self.model.last_hovered_element {
mouse_out(element);
}
self.model.last_hovered_element = Some(element.clone().upcast());
mouse_over(&element.upcast());
NoAction
}
fn insert_text(&self, text: &str) {
let document = get_document!(self);
let active_element = wtry_opt_no_ret!(document.active_element());
let element = wtry_no_show!(active_element.downcast::<DOMHTMLInputElement>());
element.set_value(text);
}
// Load the username and the password in the login form.
fn load_username_pass(&self, username: &str, password: &str) {
| let document = get_document!(self);
load_username(&document, username);
load_password(&document, password);
}
| identifier_body |
|
mod.rs | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
macro_rules! get_document {
($_self:ident) => {{
let document = $_self.model.page.dom_document();
if let Some(document) = document {
document
}
else {
return;
}
}};
}
mod marks;
mod scroll;
use std::collections::HashMap;
use std::f32;
use std::sync::Mutex;
use gio::Cancellable;
use glib::{Cast, Closure, ObjectExt, ToVariant};
use regex::Regex;
use relm::{Relm, Update, UpdateNew};
use webkit2gtk_webextension::{
traits::{
DOMDocumentExt,
DOMDOMSelectionExt,
DOMDOMWindowExt,
DOMElementExt,
DOMEventTargetExt,
DOMHTMLElementExt,
DOMHTMLInputElementExt,
DOMNodeExt,
WebPageExt,
},
DOMElement,
DOMHTMLElement,
DOMHTMLInputElement,
DOMHTMLSelectElement,
DOMHTMLTextAreaElement,
UserMessage,
WebPage,
};
use titanium_common::{FollowMode, InnerMessage, protocol::encode};
use titanium_common::Action::{
self,
CopyLink,
DownloadLink,
FileInput,
GoInInsertMode,
NoAction,
};
use titanium_common::InnerMessage::*;
use dom::{
get_body,
get_elements_by_tag_name_in_all_frames,
get_hints_container,
get_href,
get_position,
is_enabled,
is_hidden,
is_text_input,
mouse_down,
click,
mouse_out,
mouse_over,
match_pattern,
};
use hints::{create_hints, hide_unrelevant_hints, show_all_hints, HINTS_ID};
use login_form::{get_credentials, load_password, load_username, submit_login_form};
use self::Msg::*;
pub struct Executor {
model: Model,
}
pub struct Model {
activated_file_input: Option<DOMHTMLInputElement>,
hint_keys: String,
hint_map: HashMap<String, DOMElement>,
last_hovered_element: Option<DOMElement>,
marks: HashMap<u8, u32>, // Byte to percent.
page: WebPage,
relm: Relm<Executor>,
scroll_element: Option<DOMElement>,
}
#[derive(Msg)]
pub enum Msg {
DocumentLoaded,
MessageRecv(InnerMessage),
Scroll,
}
impl Update for Executor {
type Model = Model;
type ModelParam = WebPage;
type Msg = Msg;
fn model(relm: &Relm<Self>, page: WebPage) -> Model {
Model {
activated_file_input: None,
hint_keys: String::new(),
hint_map: HashMap::new(),
last_hovered_element: None,
marks: HashMap::new(),
page,
relm: relm.clone(),
scroll_element: None,
}
}
fn update(&mut self, message: Msg) {
match message {
DocumentLoaded => {
self.init_scroll_element();
self.send_scroll_percentage();
let stream = self.model.relm.stream().clone();
let stream = Mutex::new(::send_cell::SendCell::new(stream));
let handler = Closure::new(move |_| {
let stream = stream.lock().unwrap();
stream.get().emit(Scroll);
None
});
if self.model.scroll_element == get_body(&self.model.page).map(|el| el.upcast()) {
let document = wtry_opt_no_ret!(self.model.page.dom_document());
document.add_event_listener_with_closure("scroll", &handler, false);
}
else {
let element = self.model.scroll_element.as_ref().unwrap();
element.add_event_listener_with_closure("scroll", &handler, false);
}
},
MessageRecv(msg) =>
match msg {
ActivateHint(follow_mode, ctrl_key) => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | random_line_split |
|
mod.rs | , ctrl_key) => self.activate_hint(follow_mode, ctrl_key),
ActivateSelection() => self.activate_selection(),
ClickNextPage() => self.click_next_page(),
ClickPrevPage() => self.click_prev_page(),
EnterHintKey(key) => self.enter_hint_key(key),
FocusInput() => self.focus_input(),
GetCredentials() => self.send_credentials(),
GoToMark(mark) => self.go_to_mark(mark),
HideHints() => self.hide_hints(),
InsertText(text) => self.insert_text(&text),
LoadUsernamePass(username, password) => self.load_username_pass(&username, &password),
Mark(char) => self.add_mark(char),
ResetMarks() => self.reset_marks(),
ResetScrollElement() => self.reset_scroll_element(),
ScrollBy(pixels) => self.scroll_by(pixels),
ScrollByX(pixels) => self.scroll_by_x(pixels),
ScrollTop() => self.scroll_top(),
ScrollToPercent(percent) => self.scroll_to_percent(percent),
SelectFile(file) => self.select_file(&file),
ShowHints(hint_chars) => self.show_hints(&hint_chars),
SubmitLoginForm() => self.submit_login_form(),
_ => warn!("Unexpected message received: {:?}", msg),
},
Scroll => self.send_scroll_percentage(),
}
}
}
impl UpdateNew for Executor {
fn new(_relm: &Relm<Self>, model: Model) -> Self {
Executor {
model,
}
}
}
impl Executor {
// Activate (click, focus, hover) the selected hint.
fn activate_hint(&mut self, follow_mode: FollowMode, ctrl_key: bool) {
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
match element {
Some(element) => {
self.hide_hints();
self.model.hint_map.clear();
self.model.hint_keys.clear();
let action =
match follow_mode {
FollowMode::Click => self.click(element, ctrl_key),
FollowMode::CopyLink => self.copy_link(element),
FollowMode::Download => self.download_link(element),
FollowMode::Hover => self.hover(element),
};
self.send(ActivateAction(action));
},
None => self.send(ActivateAction(NoAction)),
}
}
// Click on the link of the selected text.
fn activate_selection(&self) {
// TODO: switch to using some macros to simplify this code.
let result = self.model.page.dom_document()
.and_then(|document| document.default_view())
.and_then(|window| window.selection())
.and_then(|selection| selection.anchor_node())
.and_then(|anchor_node| anchor_node.parent_element())
.and_then(|parent| parent.downcast::<DOMHTMLElement>().ok());
if let Some(parent) = result {
parent.click();
}
}
fn click(&mut self, element: DOMHTMLElement, ctrl_key: bool) -> Action {
if let Ok(input_element) = element.clone().downcast::<DOMHTMLInputElement>() {
let input_type = input_element.input_type().map(|string| string.to_string()).unwrap_or_default();
match input_type.as_ref() {
"button" | "checkbox" | "image" | "radio" | "reset" | "submit" => {
click(&element.upcast(), ctrl_key);
NoAction
},
// FIXME: file and color not opening.
"color" => NoAction,
"file" => {
self.model.activated_file_input = Some(input_element);
FileInput
},
_ => {
element.focus();
GoInInsertMode
},
}
}
else if element.is::<DOMHTMLTextAreaElement>() {
element.focus();
GoInInsertMode
}
else if element.is::<DOMHTMLSelectElement>() {
if element.attribute("multiple").is_some() {
element.focus();
GoInInsertMode
}
else {
mouse_down(&element.upcast());
NoAction
}
}
else {
click(&element.upcast(), ctrl_key);
NoAction
}
}
fn copy_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
CopyLink(href)
}
fn click_next_page(&mut self) {
let regex = Regex::new(r"(?i:next|forward|older|more|›|»)|(?:<.+>)>(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: Check if url (not text) is *very* similar to our current one
// example.com/page/4 => example.com/page/5
warn!("No next link found");
}
}
fn click_prev_page(&mut self) {
let regex = Regex::new(r"(?i:prev(ious)|back|newer|less|«|‹)|(?:<.+>)<(?:<.+>)").unwrap();
let document = get_document!(self);
if let Some(link) = match_pattern(&document, "a", regex) {
let element = wtry_no_show!(link.clone().downcast::<DOMHTMLElement>());
element.click();
}
else {
// TODO: See above
warn!("No previous link found");
}
}
fn download_link(&self, element: DOMHTMLElement) -> Action {
let href = unwrap_opt_or_ret!(get_href(&element), NoAction);
DownloadLink(href)
}
// Handle the key press event for the hint mode.
// This hides the hints that are not relevant anymore.
fn enter_hint_key(&mut self, key: char) {
self.model.hint_keys.push(key);
let element = self.model.hint_map.get(&self.model.hint_keys)
.and_then(|element| element.clone().downcast::<DOMHTMLElement>().ok());
// If no element is found, hide the unrelevant hints.
if element.is_some() {
// TODO: perhaps it'd involve less message if we remove the ActivateHint message.
self.send(ClickHintElement());
}
else {
let document = self.model.page.dom_document();
if let Some(document) = document {
let all_hidden = hide_unrelevant_hints(&document, &self.model.hint_keys);
if all_hidden {
self.model.hint_keys.clear();
show_all_hints(&document);
}
}
}
}
// Focus the first input element.
fn focus_input(&mut self) {
let document = self.model.page.dom_document();
if let Some(document) = document {
let tag_names = ["input", "textarea"];
let mut element_to_focus = None;
let mut element_y_pos = f32::INFINITY;
for tag_name in &tag_names {
let iter = get_elements_by_tag_name_in_all_frames(&document, tag_name);
for (document, element) in iter {
let tabindex = element.attribute("tabindex").map(Into::into);
if !is_hidden(&document, &element) && is_enabled(&element) && is_text_input(&element)
&& tabindex != Some("-1".to_string())
{
if let Some(pos) = get_position(&element) {
// TODO: If y is equal, compare x?
if pos.y < element_y_pos {
element_y_pos = pos.y;
element_to_focus = Some(element);
}
}
}
}
}
if let Some(element) = element_to_focus {
element.focus();
element.scroll_into_view_if_needed(false);
self.send(EnterInsertMode());
}
}
}
// Hide all the hints.
fn hide_hints(&self) {
let elements =
self.model.page.dom_document()
.and_then(|document| document.element_by_id(HINTS_ID))
.and_then(|hints| get_hints_container(&self.model.page).map(|container| (hints, container)));
if let Some((hints, container)) = elements {
check_err!(container.remove_child(&hints));
}
}
fn hover(&mut self, element: DOMHTMLElement) -> Action {
if let Some(ref element) = self.model.last_hovered_element {
mouse_out(element);
}
self.model.last_hovered_element = Some(element.clone().upcast());
mouse_over(&element.upcast());
NoAction
}
fn insert_text(&self, text: &str) {
let document = get_document!(self);
let active_element = wtry_opt_no_ret!(document.active_element());
let element = wtry_no_show!(active_element.downcast::<DOMHTMLInputElement>());
element.set_value(text);
}
// Load the username and the password in the login form.
fn load_username_pass(&self, username: &str, password: &str) {
let document = get_document!(self);
load_username(&document, username);
load_password(&document, password);
}
// Set the selected file on the input[type="file"].
fn select_file(&mut self, file: &str) {
if let Some(ref input_file) = self.model.activated_file_input.take() {
| // FIXME: this is not working.
input_file.set_value(file);
}
} | conditional_block |
|
BRDF_descriptors.py | author__ = "J Gomez-Dans"
__copyright__ = "Copyright 2017, 2018 J Gomez-Dans"
__license__ = "GPLv3"
__email__ = "[email protected]"
GDAL2NUMPY = {gdal.GDT_Byte: np.uint8,
gdal.GDT_UInt16: np.uint16,
gdal.GDT_Int16: np.int16,
gdal.GDT_UInt32: np.uint32,
gdal.GDT_Int32: np.int32,
gdal.GDT_Float32: np.float32,
gdal.GDT_Float64: np.float64,
gdal.GDT_CInt16: np.complex64,
gdal.GDT_CInt32: np.complex64,
gdal.GDT_CFloat32: np.complex64,
gdal.GDT_CFloat64: np.complex128
}
def find_granules(dire, tile, product, start_time, end_time):
"""Find MCD43 granules based on folder, tile and product type (A1
or A2). Returns a dictionary of datetimes of the products and
granules, or raises an IOError exception if not files found."""
times = []
fnames = []
path = Path(dire)
start_year = start_time.year
end_year = end_time.year
granules_start = path.rglob(f"**/MCD43{product:s}.A{start_year:4d}*.{tile:s}.*.hdf")
granules = [f for f in granules_start]
if end_year != start_year:
granules_end = path.rglob(f"**/MCD43{product:s}.A{end_year:4d}*.{tile:s}.*.hdf")
granules = granules + [f for f in granules_end]
granules = list(set(granules))
if len(granules) == 0:
raise IOError("Couldn't find any MCD43%s files in %s" % (product, dire))
for granule in granules:
fich = os.path.basename (granule)
timex = datetime.datetime.strptime(fich.split(".")[1][1:], "%Y%j")
if timex >= start_time and \
(end_time is None or timex <= end_time ):
times.append (timex)
fnames.append(granule.as_posix())
return dict(list(zip(times, fnames)))
def process_time_input(timestamp):
"""Processes a timestamp given either as (i) a string in
"%Y-%m-%d" format, (ii) a string in "%Y%j" format or
(iii) a datetime.datetime object. Returns a datetime.datetime
ojbect, and raises ValueError if none of the options fits."""
if type(timestamp) == datetime.datetime:
output_time = timestamp
elif type(timestamp) == str:
try:
output_time = datetime.datetime.strptime(timestamp,
"%Y-%m-%d")
except ValueError:
try:
output_time = datetime.datetime.strptime(timestamp,
"%Y%j")
except ValueError:
raise ValueError("The passed timestamp wasn't either " +
'a "%Y-%m-%d" string, a "%Y%j" string')
else:
raise ValueError("You can only use a string or a datetime object")
return output_time
def open_gdal_dataset(fname, roi=None):
g = gdal.Open(fname) | raise IOError("Can't open %s" % fname)
if roi is None:
data = g.ReadAsArray()
else:
ulx, uly, lrx, lry = roi
xoff = ulx
yoff = uly
xcount = lrx - ulx
ycount = lry - uly
data = g.ReadAsArray(xoff, yoff, xcount, ycount).astype(
GDAL2NUMPY[g.GetRasterBand(1).DataType])
return data
def process_masked_kernels(band_no, a1_granule, a2_granule,
band_transfer=None, roi=None):
if band_transfer is not None:
band_no = band_transfer[band_no]
fname_a1 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a1_granule)
fname_a2 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a2_granule)
try:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_Band%d' % (band_no)
except TypeError:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_%s' % (band_no)
fsnow = fname_a2 + 'Snow_BRDF_Albedo'
fland = fname_a2 + 'BRDF_Albedo_LandWaterType'
func = fname_a2 + 'BRDF_Albedo_Uncertainty' # % a2_granule
try:
fqa = fname_a2 + 'BRDF_Albedo_Band_Quality_Band%d' % band_no
except TypeError:
fqa = fname_a1 + 'BRDF_Albedo_Band_Mandatory_Quality_%s' % band_no
for fname in [fdata, fsnow, fland, fqa]:
data = open_gdal_dataset(fname, roi)
if fname.find("Albedo_Parameters") >= 0:
# Read kernels, post process
kernels = process_kernels(data)
elif fname.find("Snow") >= 0:
# Read snow mask... post process
snow = process_snow(data)
elif fname.find("XXXXXLandWaterType") >= 0:
shp = data.shape
land = np.in1d(data, [1, 3, 4, 5]) # data == 1 # Only land
land = land.reshape(shp)
# elif fname.find("BRDF_Albedo_Uncertainty") >= 0:
# unc = process_unc (data)
elif fname.find("BRDF_Albedo_Band_Quality") >= 0 or \
fname.find("BRDF_Albedo_Band_Mandatory_Quality") >= 0:
qa = np.where(data <= 1, True, False) # Best & good
qa_val = data*1
# Create mask:
# 1. Ignore snow
# 2. Only land
# 3. Only good and best
mask = snow * qa # *land * qa
qa_val = np.where(mask, qa_val, np.nan)
return kernels, mask, qa_val
def process_unc(unc):
"""Process uncertainty. Fuck know what it means..."""
unc = np.where(unc == 32767, np.nan, unc/1000.)
def process_snow(snow):
"""Returns True if snow free albedo retrieval"""
return np.where(snow == 0, True, False)
def process_kernels(kernels):
"""Scales the kernels, maybe does other things"""
kernels = np.where(kernels == 32767, np.nan, kernels/1000.)
return kernels
class RetrieveBRDFDescriptors(object):
"""Retrieving BRDF descriptors."""
def __init__(self, tile, mcd43a1_dir, start_time, end_time=None,
mcd43a2_dir=None, roi=None):
"""The class needs to locate the data granules. We assume that
these are available somewhere in the filesystem and that we can
index them by location (MODIS tile name e.g. "h19v10") and
time. The user can give a folder for the MCD43A1 and A2 granules,
and if the second is ignored, it will be assumed that they are
in the same folder. We also need a starting date (either a
datetime object, or a string in "%Y-%m-%d" or "%Y%j" format. If
the end time is not specified, it will be set to the date of the
latest granule found."""
self.tile = tile
self.start_time = process_time_input(start_time)
if end_time is not None:
self.end_time = process_time_input(end_time)
else:
self.end_time = None
if os.path.exists(mcd43a1_dir):
self.mcd43a1_dir = mcd43a1_dir
else:
raise IOError("mcd43a1_dir does not exist!")
self.a1_granules = find_granules(self.mcd43a1_dir, tile, "A1",
self.start_time, self.end_time)
if mcd43a2_dir is None:
self.mcd43a2_dir = mcd43a1_dir
else:
if os.path.exists(mcd43a2_dir):
self.mcd43a2_dir = mcd43a2_dir
else:
raise | if g is None: | random_line_split |
BRDF_descriptors.py | author__ = "J Gomez-Dans"
__copyright__ = "Copyright 2017, 2018 J Gomez-Dans"
__license__ = "GPLv3"
__email__ = "[email protected]"
GDAL2NUMPY = {gdal.GDT_Byte: np.uint8,
gdal.GDT_UInt16: np.uint16,
gdal.GDT_Int16: np.int16,
gdal.GDT_UInt32: np.uint32,
gdal.GDT_Int32: np.int32,
gdal.GDT_Float32: np.float32,
gdal.GDT_Float64: np.float64,
gdal.GDT_CInt16: np.complex64,
gdal.GDT_CInt32: np.complex64,
gdal.GDT_CFloat32: np.complex64,
gdal.GDT_CFloat64: np.complex128
}
def find_granules(dire, tile, product, start_time, end_time):
"""Find MCD43 granules based on folder, tile and product type (A1
or A2). Returns a dictionary of datetimes of the products and
granules, or raises an IOError exception if not files found."""
times = []
fnames = []
path = Path(dire)
start_year = start_time.year
end_year = end_time.year
granules_start = path.rglob(f"**/MCD43{product:s}.A{start_year:4d}*.{tile:s}.*.hdf")
granules = [f for f in granules_start]
if end_year != start_year:
granules_end = path.rglob(f"**/MCD43{product:s}.A{end_year:4d}*.{tile:s}.*.hdf")
granules = granules + [f for f in granules_end]
granules = list(set(granules))
if len(granules) == 0:
raise IOError("Couldn't find any MCD43%s files in %s" % (product, dire))
for granule in granules:
fich = os.path.basename (granule)
timex = datetime.datetime.strptime(fich.split(".")[1][1:], "%Y%j")
if timex >= start_time and \
(end_time is None or timex <= end_time ):
times.append (timex)
fnames.append(granule.as_posix())
return dict(list(zip(times, fnames)))
def process_time_input(timestamp):
"""Processes a timestamp given either as (i) a string in
"%Y-%m-%d" format, (ii) a string in "%Y%j" format or
(iii) a datetime.datetime object. Returns a datetime.datetime
ojbect, and raises ValueError if none of the options fits."""
if type(timestamp) == datetime.datetime:
output_time = timestamp
elif type(timestamp) == str:
try:
output_time = datetime.datetime.strptime(timestamp,
"%Y-%m-%d")
except ValueError:
try:
output_time = datetime.datetime.strptime(timestamp,
"%Y%j")
except ValueError:
raise ValueError("The passed timestamp wasn't either " +
'a "%Y-%m-%d" string, a "%Y%j" string')
else:
raise ValueError("You can only use a string or a datetime object")
return output_time
def open_gdal_dataset(fname, roi=None):
g = gdal.Open(fname)
if g is None:
raise IOError("Can't open %s" % fname)
if roi is None:
data = g.ReadAsArray()
else:
ulx, uly, lrx, lry = roi
xoff = ulx
yoff = uly
xcount = lrx - ulx
ycount = lry - uly
data = g.ReadAsArray(xoff, yoff, xcount, ycount).astype(
GDAL2NUMPY[g.GetRasterBand(1).DataType])
return data
def | (band_no, a1_granule, a2_granule,
band_transfer=None, roi=None):
if band_transfer is not None:
band_no = band_transfer[band_no]
fname_a1 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a1_granule)
fname_a2 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a2_granule)
try:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_Band%d' % (band_no)
except TypeError:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_%s' % (band_no)
fsnow = fname_a2 + 'Snow_BRDF_Albedo'
fland = fname_a2 + 'BRDF_Albedo_LandWaterType'
func = fname_a2 + 'BRDF_Albedo_Uncertainty' # % a2_granule
try:
fqa = fname_a2 + 'BRDF_Albedo_Band_Quality_Band%d' % band_no
except TypeError:
fqa = fname_a1 + 'BRDF_Albedo_Band_Mandatory_Quality_%s' % band_no
for fname in [fdata, fsnow, fland, fqa]:
data = open_gdal_dataset(fname, roi)
if fname.find("Albedo_Parameters") >= 0:
# Read kernels, post process
kernels = process_kernels(data)
elif fname.find("Snow") >= 0:
# Read snow mask... post process
snow = process_snow(data)
elif fname.find("XXXXXLandWaterType") >= 0:
shp = data.shape
land = np.in1d(data, [1, 3, 4, 5]) # data == 1 # Only land
land = land.reshape(shp)
# elif fname.find("BRDF_Albedo_Uncertainty") >= 0:
# unc = process_unc (data)
elif fname.find("BRDF_Albedo_Band_Quality") >= 0 or \
fname.find("BRDF_Albedo_Band_Mandatory_Quality") >= 0:
qa = np.where(data <= 1, True, False) # Best & good
qa_val = data*1
# Create mask:
# 1. Ignore snow
# 2. Only land
# 3. Only good and best
mask = snow * qa # *land * qa
qa_val = np.where(mask, qa_val, np.nan)
return kernels, mask, qa_val
def process_unc(unc):
"""Process uncertainty. Fuck know what it means..."""
unc = np.where(unc == 32767, np.nan, unc/1000.)
def process_snow(snow):
"""Returns True if snow free albedo retrieval"""
return np.where(snow == 0, True, False)
def process_kernels(kernels):
"""Scales the kernels, maybe does other things"""
kernels = np.where(kernels == 32767, np.nan, kernels/1000.)
return kernels
class RetrieveBRDFDescriptors(object):
"""Retrieving BRDF descriptors."""
def __init__(self, tile, mcd43a1_dir, start_time, end_time=None,
mcd43a2_dir=None, roi=None):
"""The class needs to locate the data granules. We assume that
these are available somewhere in the filesystem and that we can
index them by location (MODIS tile name e.g. "h19v10") and
time. The user can give a folder for the MCD43A1 and A2 granules,
and if the second is ignored, it will be assumed that they are
in the same folder. We also need a starting date (either a
datetime object, or a string in "%Y-%m-%d" or "%Y%j" format. If
the end time is not specified, it will be set to the date of the
latest granule found."""
self.tile = tile
self.start_time = process_time_input(start_time)
if end_time is not None:
self.end_time = process_time_input(end_time)
else:
self.end_time = None
if os.path.exists(mcd43a1_dir):
self.mcd43a1_dir = mcd43a1_dir
else:
raise IOError("mcd43a1_dir does not exist!")
self.a1_granules = find_granules(self.mcd43a1_dir, tile, "A1",
self.start_time, self.end_time)
if mcd43a2_dir is None:
self.mcd43a2_dir = mcd43a1_dir
else:
if os.path.exists(mcd43a2_dir):
self.mcd43a2_dir = mcd43a2_dir
else:
| process_masked_kernels | identifier_name |
BRDF_descriptors.py | __author__ = "J Gomez-Dans"
__copyright__ = "Copyright 2017, 2018 J Gomez-Dans"
__license__ = "GPLv3"
__email__ = "[email protected]"
GDAL2NUMPY = {gdal.GDT_Byte: np.uint8,
gdal.GDT_UInt16: np.uint16,
gdal.GDT_Int16: np.int16,
gdal.GDT_UInt32: np.uint32,
gdal.GDT_Int32: np.int32,
gdal.GDT_Float32: np.float32,
gdal.GDT_Float64: np.float64,
gdal.GDT_CInt16: np.complex64,
gdal.GDT_CInt32: np.complex64,
gdal.GDT_CFloat32: np.complex64,
gdal.GDT_CFloat64: np.complex128
}
def find_granules(dire, tile, product, start_time, end_time):
"""Find MCD43 granules based on folder, tile and product type (A1
or A2). Returns a dictionary of datetimes of the products and
granules, or raises an IOError exception if not files found."""
times = []
fnames = []
path = Path(dire)
start_year = start_time.year
end_year = end_time.year
granules_start = path.rglob(f"**/MCD43{product:s}.A{start_year:4d}*.{tile:s}.*.hdf")
granules = [f for f in granules_start]
if end_year != start_year:
granules_end = path.rglob(f"**/MCD43{product:s}.A{end_year:4d}*.{tile:s}.*.hdf")
granules = granules + [f for f in granules_end]
granules = list(set(granules))
if len(granules) == 0:
raise IOError("Couldn't find any MCD43%s files in %s" % (product, dire))
for granule in granules:
fich = os.path.basename (granule)
timex = datetime.datetime.strptime(fich.split(".")[1][1:], "%Y%j")
if timex >= start_time and \
(end_time is None or timex <= end_time ):
times.append (timex)
fnames.append(granule.as_posix())
return dict(list(zip(times, fnames)))
def process_time_input(timestamp):
"""Processes a timestamp given either as (i) a string in
"%Y-%m-%d" format, (ii) a string in "%Y%j" format or
(iii) a datetime.datetime object. Returns a datetime.datetime
ojbect, and raises ValueError if none of the options fits."""
if type(timestamp) == datetime.datetime:
output_time = timestamp
elif type(timestamp) == str:
try:
output_time = datetime.datetime.strptime(timestamp,
"%Y-%m-%d")
except ValueError:
try:
output_time = datetime.datetime.strptime(timestamp,
"%Y%j")
except ValueError:
raise ValueError("The passed timestamp wasn't either " +
'a "%Y-%m-%d" string, a "%Y%j" string')
else:
raise ValueError("You can only use a string or a datetime object")
return output_time
def open_gdal_dataset(fname, roi=None):
g = gdal.Open(fname)
if g is None:
raise IOError("Can't open %s" % fname)
if roi is None:
data = g.ReadAsArray()
else:
ulx, uly, lrx, lry = roi
xoff = ulx
yoff = uly
xcount = lrx - ulx
ycount = lry - uly
data = g.ReadAsArray(xoff, yoff, xcount, ycount).astype(
GDAL2NUMPY[g.GetRasterBand(1).DataType])
return data
def process_masked_kernels(band_no, a1_granule, a2_granule,
band_transfer=None, roi=None):
if band_transfer is not None:
band_no = band_transfer[band_no]
fname_a1 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a1_granule)
fname_a2 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a2_granule)
try:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_Band%d' % (band_no)
except TypeError:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_%s' % (band_no)
fsnow = fname_a2 + 'Snow_BRDF_Albedo'
fland = fname_a2 + 'BRDF_Albedo_LandWaterType'
func = fname_a2 + 'BRDF_Albedo_Uncertainty' # % a2_granule
try:
fqa = fname_a2 + 'BRDF_Albedo_Band_Quality_Band%d' % band_no
except TypeError:
fqa = fname_a1 + 'BRDF_Albedo_Band_Mandatory_Quality_%s' % band_no
for fname in [fdata, fsnow, fland, fqa]:
data = open_gdal_dataset(fname, roi)
if fname.find("Albedo_Parameters") >= 0:
# Read kernels, post process
kernels = process_kernels(data)
elif fname.find("Snow") >= 0:
# Read snow mask... post process
snow = process_snow(data)
elif fname.find("XXXXXLandWaterType") >= 0:
shp = data.shape
land = np.in1d(data, [1, 3, 4, 5]) # data == 1 # Only land
land = land.reshape(shp)
# elif fname.find("BRDF_Albedo_Uncertainty") >= 0:
# unc = process_unc (data)
elif fname.find("BRDF_Albedo_Band_Quality") >= 0 or \
fname.find("BRDF_Albedo_Band_Mandatory_Quality") >= 0:
qa = np.where(data <= 1, True, False) # Best & good
qa_val = data*1
# Create mask:
# 1. Ignore snow
# 2. Only land
# 3. Only good and best
mask = snow * qa # *land * qa
qa_val = np.where(mask, qa_val, np.nan)
return kernels, mask, qa_val
def process_unc(unc):
"""Process uncertainty. Fuck know what it means..."""
unc = np.where(unc == 32767, np.nan, unc/1000.)
def process_snow(snow):
"""Returns True if snow free albedo retrieval"""
return np.where(snow == 0, True, False)
def process_kernels(kernels):
"""Scales the kernels, maybe does other things"""
kernels = np.where(kernels == 32767, np.nan, kernels/1000.)
return kernels
class RetrieveBRDFDescriptors(object):
| if os.path.exists(mcd43a1_dir):
self.mcd43a1_dir = mcd43a1_dir
else:
raise IOError("mcd43a1_dir does not exist!")
self.a1_granules = find_granules(self.mcd43a1_dir, tile, "A1",
self.start_time, self.end_time)
if mcd43a2_dir is None:
self.mcd43a2_dir = mcd43a1_dir
else:
if os.path.exists(mcd43a2_dir):
self.mcd43a2_dir = mcd43a2_dir
else:
raise | """Retrieving BRDF descriptors."""
def __init__(self, tile, mcd43a1_dir, start_time, end_time=None,
mcd43a2_dir=None, roi=None):
"""The class needs to locate the data granules. We assume that
these are available somewhere in the filesystem and that we can
index them by location (MODIS tile name e.g. "h19v10") and
time. The user can give a folder for the MCD43A1 and A2 granules,
and if the second is ignored, it will be assumed that they are
in the same folder. We also need a starting date (either a
datetime object, or a string in "%Y-%m-%d" or "%Y%j" format. If
the end time is not specified, it will be set to the date of the
latest granule found."""
self.tile = tile
self.start_time = process_time_input(start_time)
if end_time is not None:
self.end_time = process_time_input(end_time)
else:
self.end_time = None | identifier_body |
BRDF_descriptors.py | author__ = "J Gomez-Dans"
__copyright__ = "Copyright 2017, 2018 J Gomez-Dans"
__license__ = "GPLv3"
__email__ = "[email protected]"
GDAL2NUMPY = {gdal.GDT_Byte: np.uint8,
gdal.GDT_UInt16: np.uint16,
gdal.GDT_Int16: np.int16,
gdal.GDT_UInt32: np.uint32,
gdal.GDT_Int32: np.int32,
gdal.GDT_Float32: np.float32,
gdal.GDT_Float64: np.float64,
gdal.GDT_CInt16: np.complex64,
gdal.GDT_CInt32: np.complex64,
gdal.GDT_CFloat32: np.complex64,
gdal.GDT_CFloat64: np.complex128
}
def find_granules(dire, tile, product, start_time, end_time):
"""Find MCD43 granules based on folder, tile and product type (A1
or A2). Returns a dictionary of datetimes of the products and
granules, or raises an IOError exception if not files found."""
times = []
fnames = []
path = Path(dire)
start_year = start_time.year
end_year = end_time.year
granules_start = path.rglob(f"**/MCD43{product:s}.A{start_year:4d}*.{tile:s}.*.hdf")
granules = [f for f in granules_start]
if end_year != start_year:
granules_end = path.rglob(f"**/MCD43{product:s}.A{end_year:4d}*.{tile:s}.*.hdf")
granules = granules + [f for f in granules_end]
granules = list(set(granules))
if len(granules) == 0:
raise IOError("Couldn't find any MCD43%s files in %s" % (product, dire))
for granule in granules:
fich = os.path.basename (granule)
timex = datetime.datetime.strptime(fich.split(".")[1][1:], "%Y%j")
if timex >= start_time and \
(end_time is None or timex <= end_time ):
times.append (timex)
fnames.append(granule.as_posix())
return dict(list(zip(times, fnames)))
def process_time_input(timestamp):
"""Processes a timestamp given either as (i) a string in
"%Y-%m-%d" format, (ii) a string in "%Y%j" format or
(iii) a datetime.datetime object. Returns a datetime.datetime
ojbect, and raises ValueError if none of the options fits."""
if type(timestamp) == datetime.datetime:
output_time = timestamp
elif type(timestamp) == str:
|
else:
raise ValueError("You can only use a string or a datetime object")
return output_time
def open_gdal_dataset(fname, roi=None):
g = gdal.Open(fname)
if g is None:
raise IOError("Can't open %s" % fname)
if roi is None:
data = g.ReadAsArray()
else:
ulx, uly, lrx, lry = roi
xoff = ulx
yoff = uly
xcount = lrx - ulx
ycount = lry - uly
data = g.ReadAsArray(xoff, yoff, xcount, ycount).astype(
GDAL2NUMPY[g.GetRasterBand(1).DataType])
return data
def process_masked_kernels(band_no, a1_granule, a2_granule,
band_transfer=None, roi=None):
if band_transfer is not None:
band_no = band_transfer[band_no]
fname_a1 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a1_granule)
fname_a2 = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_BRDF:' % (a2_granule)
try:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_Band%d' % (band_no)
except TypeError:
fdata = fname_a1 + 'BRDF_Albedo_Parameters_%s' % (band_no)
fsnow = fname_a2 + 'Snow_BRDF_Albedo'
fland = fname_a2 + 'BRDF_Albedo_LandWaterType'
func = fname_a2 + 'BRDF_Albedo_Uncertainty' # % a2_granule
try:
fqa = fname_a2 + 'BRDF_Albedo_Band_Quality_Band%d' % band_no
except TypeError:
fqa = fname_a1 + 'BRDF_Albedo_Band_Mandatory_Quality_%s' % band_no
for fname in [fdata, fsnow, fland, fqa]:
data = open_gdal_dataset(fname, roi)
if fname.find("Albedo_Parameters") >= 0:
# Read kernels, post process
kernels = process_kernels(data)
elif fname.find("Snow") >= 0:
# Read snow mask... post process
snow = process_snow(data)
elif fname.find("XXXXXLandWaterType") >= 0:
shp = data.shape
land = np.in1d(data, [1, 3, 4, 5]) # data == 1 # Only land
land = land.reshape(shp)
# elif fname.find("BRDF_Albedo_Uncertainty") >= 0:
# unc = process_unc (data)
elif fname.find("BRDF_Albedo_Band_Quality") >= 0 or \
fname.find("BRDF_Albedo_Band_Mandatory_Quality") >= 0:
qa = np.where(data <= 1, True, False) # Best & good
qa_val = data*1
# Create mask:
# 1. Ignore snow
# 2. Only land
# 3. Only good and best
mask = snow * qa # *land * qa
qa_val = np.where(mask, qa_val, np.nan)
return kernels, mask, qa_val
def process_unc(unc):
"""Process uncertainty. Fuck know what it means..."""
unc = np.where(unc == 32767, np.nan, unc/1000.)
def process_snow(snow):
"""Returns True if snow free albedo retrieval"""
return np.where(snow == 0, True, False)
def process_kernels(kernels):
"""Scales the kernels, maybe does other things"""
kernels = np.where(kernels == 32767, np.nan, kernels/1000.)
return kernels
class RetrieveBRDFDescriptors(object):
"""Retrieving BRDF descriptors."""
def __init__(self, tile, mcd43a1_dir, start_time, end_time=None,
mcd43a2_dir=None, roi=None):
"""The class needs to locate the data granules. We assume that
these are available somewhere in the filesystem and that we can
index them by location (MODIS tile name e.g. "h19v10") and
time. The user can give a folder for the MCD43A1 and A2 granules,
and if the second is ignored, it will be assumed that they are
in the same folder. We also need a starting date (either a
datetime object, or a string in "%Y-%m-%d" or "%Y%j" format. If
the end time is not specified, it will be set to the date of the
latest granule found."""
self.tile = tile
self.start_time = process_time_input(start_time)
if end_time is not None:
self.end_time = process_time_input(end_time)
else:
self.end_time = None
if os.path.exists(mcd43a1_dir):
self.mcd43a1_dir = mcd43a1_dir
else:
raise IOError("mcd43a1_dir does not exist!")
self.a1_granules = find_granules(self.mcd43a1_dir, tile, "A1",
self.start_time, self.end_time)
if mcd43a2_dir is None:
self.mcd43a2_dir = mcd43a1_dir
else:
if os.path.exists(mcd43a2_dir):
self.mcd43a2_dir = mcd43a2_dir
else:
| try:
output_time = datetime.datetime.strptime(timestamp,
"%Y-%m-%d")
except ValueError:
try:
output_time = datetime.datetime.strptime(timestamp,
"%Y%j")
except ValueError:
raise ValueError("The passed timestamp wasn't either " +
'a "%Y-%m-%d" string, a "%Y%j" string') | conditional_block |
bidirectional.rs | fn render<F: FnMut(Progress<'_>)>(
film: &Film,
task_runner: TaskRunner,
mut on_status: F,
renderer: &Renderer,
config: &BidirParams,
world: &World,
camera: &Camera,
resources: &Resources,
) | index, rng, tile, film, camera, world, resources, renderer, config, progress,
);
},
|_, _| {
progress += 1;
on_status(Progress {
progress: ((progress * 100) / num_tiles) as u8,
message: &status_message,
});
},
);
}
fn render_tile<R: Rng>(
index: usize,
mut rng: R,
tile: Tile,
film: &Film,
camera: &Camera,
world: &World,
resources: &Resources,
renderer: &Renderer,
bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional = !bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional = !bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[Bounce<'a>],
world: &World,
use_additional: bool,
exe: &mut ExecutionContext<'a>,
) -> Vec<Sample> {
let mut contributions = vec![];
let bounce_brdf = match bounce.ty {
BounceType::Emission | BounceType::Specular => return contributions,
BounceType::Diffuse(brdf, _) => brdf,
};
for (i, lamp_bounce) in path.iter().enumerate() {
if let BounceType::Specular = lamp_bounce.ty {
continue;
}
let from = bounce.position;
let to = lamp_bounce.position;
let direction = to - from;
let sq_distance = direction.magnitude2();
let distance = sq_distance.sqrt();
let ray = Ray3::new(from, direction / distance);
if bounce.normal.dot | {
fn gen_rng() -> XorShiftRng {
XorShiftRng::from_rng(rand::thread_rng()).expect("could not generate RNG")
}
let tiles = make_tiles(film.width(), film.height(), renderer.tile_size, camera);
let status_message = "Rendering";
on_status(Progress {
progress: 0,
message: &status_message,
});
let mut progress: usize = 0;
let num_tiles = tiles.len();
task_runner.run_tasks(
tiles.into_iter().map(|f| (f, gen_rng())),
|index, (tile, rng), progress| {
render_tile( | identifier_body |
bidirectional.rs | bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional = !bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional = !bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[Bounce<'a>],
world: &World,
use_additional: bool,
exe: &mut ExecutionContext<'a>,
) -> Vec<Sample> {
let mut contributions = vec![];
let bounce_brdf = match bounce.ty {
BounceType::Emission | BounceType::Specular => return contributions,
BounceType::Diffuse(brdf, _) => brdf,
};
for (i, lamp_bounce) in path.iter().enumerate() {
if let BounceType::Specular = lamp_bounce.ty {
continue;
}
let from = bounce.position;
let to = lamp_bounce.position;
let direction = to - from;
let sq_distance = direction.magnitude2();
let distance = sq_distance.sqrt();
let ray = Ray3::new(from, direction / distance);
if bounce.normal.dot(ray.direction) <= 0.0 {
continue;
}
if lamp_bounce.normal.dot(-ray.direction) <= 0.0 {
continue;
}
let hit = world.intersect(ray).map(|hit| hit.distance);
if let Some(dist) = hit {
if dist < distance - DIST_EPSILON {
continue;
}
}
let cos_out = bounce.normal.dot(ray.direction).abs();
let cos_in = lamp_bounce.normal.dot(-ray.direction).abs();
let brdf_out = bounce_brdf(bounce.incident, bounce.normal, ray.direction)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
let scale = cos_in * cos_out * brdf_out / (2.0 * std::f32::consts::PI * sq_distance);
let brdf_in = lamp_bounce.ty.brdf(-ray.direction, lamp_bounce.normal)
/ lamp_bounce
.ty
.brdf(lamp_bounce.incident, lamp_bounce.normal);
let mut use_additional = use_additional;
let mut additional: Vec<_> = additional
.iter()
.cloned()
.map(|(s, r)| (s, r * scale))
.collect();
let mut main = main.clone();
main.1 *= scale;
for (i, bounce) in path[i..].iter().enumerate() {
use_additional = !bounce.dispersed && use_additional;
let additional_samples = if use_additional | {
&mut *additional
} | conditional_block |
|
bidirectional.rs | fn render<F: FnMut(Progress<'_>)>(
film: &Film,
task_runner: TaskRunner,
mut on_status: F,
renderer: &Renderer,
config: &BidirParams,
world: &World,
camera: &Camera,
resources: &Resources,
) {
fn | () -> XorShiftRng {
XorShiftRng::from_rng(rand::thread_rng()).expect("could not generate RNG")
}
let tiles = make_tiles(film.width(), film.height(), renderer.tile_size, camera);
let status_message = "Rendering";
on_status(Progress {
progress: 0,
message: &status_message,
});
let mut progress: usize = 0;
let num_tiles = tiles.len();
task_runner.run_tasks(
tiles.into_iter().map(|f| (f, gen_rng())),
|index, (tile, rng), progress| {
render_tile(
index, rng, tile, film, camera, world, resources, renderer, config, progress,
);
},
|_, _| {
progress += 1;
on_status(Progress {
progress: ((progress * 100) / num_tiles) as u8,
message: &status_message,
});
},
);
}
fn render_tile<R: Rng>(
index: usize,
mut rng: R,
tile: Tile,
film: &Film,
camera: &Camera,
world: &World,
resources: &Resources,
renderer: &Renderer,
bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional = !bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional = !bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[Bounce<'a>],
world: &World,
use_additional: bool,
exe: &mut ExecutionContext<'a>,
) -> Vec<Sample> {
let mut contributions = vec![];
let bounce_brdf = match bounce.ty {
BounceType::Emission | BounceType::Specular => return contributions,
BounceType::Diffuse(brdf, _) => brdf,
};
for (i, lamp_bounce) in path.iter().enumerate() {
if let BounceType::Specular = lamp_bounce.ty {
continue;
}
let from = bounce.position;
let to = lamp_bounce.position;
let direction = to - from;
let sq_distance = direction.magnitude2();
let distance = sq_distance.sqrt();
let ray = Ray3::new(from, direction / distance);
if bounce.normal.dot | gen_rng | identifier_name |
bidirectional.rs | use cgmath::{EuclideanSpace, InnerSpace, Point2, Vector3};
use collision::Ray3;
use super::{
algorithm::{contribute, make_tiles, Tile},
LocalProgress, Progress, Renderer, TaskRunner,
};
use crate::cameras::Camera;
use crate::film::{Film, Sample};
use crate::lamp::{RaySample, Surface};
use crate::tracer::{trace, Bounce, BounceType};
use crate::utils::pairs;
use crate::{
materials::ProbabilityInput,
math::DIST_EPSILON,
program::{ExecutionContext, Resources},
world::World,
};
use std::{
cell::Cell,
time::{Duration, Instant},
};
pub struct BidirParams {
pub bounces: u32,
}
pub(crate) fn render<F: FnMut(Progress<'_>)>(
film: &Film,
task_runner: TaskRunner,
mut on_status: F,
renderer: &Renderer,
config: &BidirParams,
world: &World,
camera: &Camera,
resources: &Resources,
) {
fn gen_rng() -> XorShiftRng {
XorShiftRng::from_rng(rand::thread_rng()).expect("could not generate RNG")
}
let tiles = make_tiles(film.width(), film.height(), renderer.tile_size, camera);
let status_message = "Rendering";
on_status(Progress {
progress: 0,
message: &status_message,
});
let mut progress: usize = 0;
let num_tiles = tiles.len();
task_runner.run_tasks(
tiles.into_iter().map(|f| (f, gen_rng())),
|index, (tile, rng), progress| {
render_tile(
index, rng, tile, film, camera, world, resources, renderer, config, progress,
);
},
|_, _| {
progress += 1;
on_status(Progress {
progress: ((progress * 100) / num_tiles) as u8,
message: &status_message,
});
},
);
}
fn render_tile<R: Rng>(
index: usize,
mut rng: R,
tile: Tile,
film: &Film,
camera: &Camera,
world: &World,
resources: &Resources,
renderer: &Renderer,
bidir_params: &BidirParams,
progress: LocalProgress,
) {
let mut lamp_path = Vec::with_capacity(bidir_params.bounces as usize + 1);
let mut camera_path = Vec::with_capacity(renderer.bounces as usize);
let mut additional_samples = Vec::with_capacity(renderer.spectrum_samples as usize - 1);
let mut exe = ExecutionContext::new(resources);
let iterations = tile.area() as u64 * renderer.pixel_samples as u64;
let message = format!("Tile {}", index);
let mut last_progress = Instant::now();
progress.show(message, iterations);
for i in 0..iterations {
if Instant::now() - last_progress > Duration::from_millis(100) {
progress.set_progress(i);
last_progress = Instant::now();
}
lamp_path.clear();
camera_path.clear();
additional_samples.clear();
let position = tile.sample_point(&mut rng);
additional_samples.extend(
film.sample_many_wavelengths(&mut rng, renderer.spectrum_samples as usize)
.map(|wavelength| {
(
Sample {
wavelength,
brightness: 0.0,
weight: 1.0,
},
1.0,
)
}),
);
let mut main_sample =
additional_samples.swap_remove(rng.gen_range(0..additional_samples.len()));
let wavelength = main_sample.0.wavelength;
let camera_ray = camera.ray_towards(&position, &mut rng);
let lamp_sample = world
.pick_lamp(&mut rng)
.and_then(|(l, p)| l.sample_ray(&mut rng).map(|r| (r, p)));
if let Some((lamp_sample, probability)) = lamp_sample {
let RaySample {
mut ray,
surface,
weight,
} = lamp_sample;
let (color, material_probability, dispersed, normal, texture) = match surface {
Surface::Physical {
normal,
material,
texture,
} => {
let component = material.choose_emissive(&mut rng);
let input = ProbabilityInput {
wavelength,
wavelength_used: Cell::new(false),
normal,
incident: -ray.direction,
texture_coordinate: texture,
};
let probability = component.get_probability(&mut exe, &input);
(
component.bsdf.color,
probability,
input.wavelength_used.get(),
normal,
texture,
)
}
Surface::Color(color) => (color, 1.0, false, ray.direction, Point2::origin()),
};
ray.origin += normal * DIST_EPSILON;
lamp_path.push(Bounce {
ty: BounceType::Emission,
dispersed,
color,
incident: Vector3::new(0.0, 0.0, 0.0),
position: ray.origin,
normal,
texture,
probability: weight / (probability * material_probability),
direct_light: vec![],
});
trace(
&mut lamp_path,
&mut rng,
ray,
wavelength,
world,
bidir_params.bounces,
0,
&mut exe,
);
pairs(&mut lamp_path, |to, from| {
to.incident = -from.incident;
if let BounceType::Diffuse(_, ref mut o) = from.ty {
*o = from.incident
}
});
if lamp_path.len() > 1 {
if let Some(last) = lamp_path.pop() {
match last.ty {
BounceType::Diffuse(_, _) | BounceType::Specular => lamp_path.push(last),
BounceType::Emission => {}
}
}
}
lamp_path.reverse();
}
trace(
&mut camera_path,
&mut rng,
camera_ray,
wavelength,
world,
renderer.bounces,
renderer.light_samples,
&mut exe,
);
let total = (camera_path.len() * lamp_path.len()) as f32;
let weight = 1.0 / total;
let mut use_additional = true;
for bounce in &camera_path {
use_additional = !bounce.dispersed && use_additional;
let additional_samples_slice = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples_slice, &mut exe);
for mut contribution in connect_paths(
&bounce,
&main_sample,
&additional_samples,
&lamp_path,
world,
use_additional,
&mut exe,
) {
contribution.weight = weight;
film.expose(position, contribution);
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
let weight = 1.0 / lamp_path.len() as f32;
for (i, bounce) in lamp_path.iter().enumerate() {
if let BounceType::Diffuse(_, _) = bounce.ty {
} else {
continue;
}
let camera_hit = camera.is_visible(bounce.position, &world, &mut rng);
if let Some((position, ray)) = camera_hit {
if position.x > -1.0 && position.x < 1.0 && position.y > -1.0 && position.y < 1.0 {
let sq_distance = (ray.origin - bounce.position).magnitude2();
let scale = 1.0 / (sq_distance);
let brdf_in = bounce.ty.brdf(-ray.direction, bounce.normal)
/ bounce.ty.brdf(bounce.incident, bounce.normal);
main_sample.0.brightness = 0.0;
main_sample.0.weight = weight;
main_sample.1 = scale;
use_additional = true;
for &mut (ref mut sample, ref mut reflectance) in &mut additional_samples {
sample.brightness = 0.0;
sample.weight = weight;
*reflectance = scale;
}
for (i, bounce) in lamp_path[i..].iter().enumerate() {
use_additional = !bounce.dispersed && use_additional;
let additional_samples = if use_additional {
&mut *additional_samples
} else {
&mut []
};
contribute(bounce, &mut main_sample, additional_samples, &mut exe);
if i == 0 {
main_sample.1 *= brdf_in;
for (_, reflectance) in additional_samples {
*reflectance *= brdf_in;
}
}
}
film.expose(position, main_sample.0.clone());
if use_additional {
for &(ref sample, _) in &additional_samples {
film.expose(position, sample.clone());
}
}
}
}
}
}
}
fn connect_paths<'a>(
bounce: &Bounce<'a>,
main: &(Sample, f32),
additional: &[(Sample, f32)],
path: &[B | use rand::{self, Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
| random_line_split |
|
parser.go | imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
switch p.tok {
case token.IDENT:
x := p.parseIdent()
// if !lhs {
// p.resolve(x)
// }
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseOperand(false)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
default:
p.errorExpected(p.pos, "operand")
return &ast.BadExpr{From: p.pos - 10, To: p.pos + 10}
}
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
// prev := p.pos
p.pos, p.tok, p.lit = p.scanner.Scan()
}
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte) {
p.file = fset.AddFile(filename, -1, len(src))
var m scanner.Mode
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.next()
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos token.Pos, msg string) {
epos := p.file.Position(pos)
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, tryResolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
//
func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
}
func (p *parser) resolve(x ast.Expr) {
p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
if _, err := io.Copy(&buf, s); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
return nil, errors.New("invalid source")
}
return ioutil.ReadFile(filename)
}
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality. Position information is recorded in the
// file set fset.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.Bad* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(fset *token.FileSet, filename string, src interface{}) *ast.File {
// get source
text, err := readSource(filename, src)
if err != nil {
return nil
}
var p parser
// parse source
p.init(fset, filename, text)
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// xxx := p.parseOperand(false)
return &ast.File{
Name: &ast.Ident{Name: "p"},
Decls: []ast.Decl{
& |
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers | random_line_split |
|
parser.go | imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
switch p.tok {
case token.IDENT:
x := p.parseIdent()
// if !lhs {
// p.resolve(x)
// }
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseOperand(false)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
default:
p.errorExpected(p.pos, "operand")
return &ast.BadExpr{From: p.pos - 10, To: p.pos + 10}
}
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
// prev := p.pos
p.pos, p.tok, p.lit = p.scanner.Scan()
}
func (p *parser) | (a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte) {
p.file = fset.AddFile(filename, -1, len(src))
var m scanner.Mode
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.next()
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos token.Pos, msg string) {
epos := p.file.Position(pos)
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, tryResolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
//
func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
}
func (p *parser) resolve(x ast.Expr) {
p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
if _, err := io.Copy(&buf, s); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
return nil, errors.New("invalid source")
}
return ioutil.ReadFile(filename)
}
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality. Position information is recorded in the
// file set fset.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.Bad* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(fset *token.FileSet, filename string, src interface{}) *ast.File {
// get source
text, err := readSource(filename, src)
if err != nil {
return nil
}
var p parser
// parse source
p.init(fset, filename, text)
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// xxx := p.parseOperand(false)
return &ast.File{
Name: &ast.Ident{Name: "p"},
Decls: []ast.Decl{
&ast.FuncDecl{
Name: &ast.Ident{Name: "f"},
Type: &ast.FuncType{},
},
},
}
}
func StringsToArray(arr0 []string) []interface{} {
res := make([] | printTrace | identifier_name |
parser.go | imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
switch p.tok {
case token.IDENT:
x := p.parseIdent()
// if !lhs {
// p.resolve(x)
// }
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseOperand(false)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
default:
p.errorExpected(p.pos, "operand")
return &ast.BadExpr{From: p.pos - 10, To: p.pos + 10}
}
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
// prev := p.pos
p.pos, p.tok, p.lit = p.scanner.Scan()
}
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte) {
p.file = fset.AddFile(filename, -1, len(src))
var m scanner.Mode
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.next()
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos token.Pos, msg string) {
epos := p.file.Position(pos)
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, tryResolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
//
func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil |
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
}
func (p *parser) resolve(x ast.Expr) {
p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
if _, err := io.Copy(&buf, s); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
return nil, errors.New("invalid source")
}
return ioutil.ReadFile(filename)
}
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality. Position information is recorded in the
// file set fset.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.Bad* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(fset *token.FileSet, filename string, src interface{}) *ast.File {
// get source
text, err := readSource(filename, src)
if err != nil {
return nil
}
var p parser
// parse source
p.init(fset, filename, text)
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// xxx := p.parseOperand(false)
return &ast.File{
Name: &ast.Ident{Name: "p"},
Decls: []ast.Decl{
&ast.FuncDecl{
Name: &ast.Ident{Name: "f"},
Type: &ast.FuncType{},
},
},
}
}
func StringsToArray(arr0 []string) []interface{} {
res := make | {
return
} | conditional_block |
parser.go | imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
switch p.tok {
case token.IDENT:
x := p.parseIdent()
// if !lhs {
// p.resolve(x)
// }
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseOperand(false)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
default:
p.errorExpected(p.pos, "operand")
return &ast.BadExpr{From: p.pos - 10, To: p.pos + 10}
}
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
// prev := p.pos
p.pos, p.tok, p.lit = p.scanner.Scan()
}
func (p *parser) printTrace(a ...interface{}) |
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte) {
p.file = fset.AddFile(filename, -1, len(src))
var m scanner.Mode
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.next()
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos token.Pos, msg string) {
epos := p.file.Position(pos)
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, tryResolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
//
func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
}
func (p *parser) resolve(x ast.Expr) {
p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
if _, err := io.Copy(&buf, s); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
return nil, errors.New("invalid source")
}
return ioutil.ReadFile(filename)
}
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality. Position information is recorded in the
// file set fset.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.Bad* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(fset *token.FileSet, filename string, src interface{}) *ast.File {
// get source
text, err := readSource(filename, src)
if err != nil {
return nil
}
var p parser
// parse source
p.init(fset, filename, text)
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// xxx := p.parseOperand(false)
return &ast.File{
Name: &ast.Ident{Name: "p"},
Decls: []ast.Decl{
&ast.FuncDecl{
Name: &ast.Ident{Name: "f"},
Type: &ast.FuncType{},
},
},
}
}
func StringsToArray(arr0 []string) []interface{} {
res := make | {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
} | identifier_body |
bindings.go | // switch e := event.(type) {
// case KeyEvent:
// InfoMapKey(e, v)
// case KeySequenceEvent:
// InfoMapKey(e, v)
// case MouseEvent:
// InfoMapMouse(e, v)
// case RawEvent:
// InfoMapKey(e, v)
// }
}
var r = regexp.MustCompile("<(.+?)>")
func findEvents(k string) (b KeySequenceEvent, ok bool, err error) {
var events []Event = nil
for len(k) > 0 {
groups := r.FindStringSubmatchIndex(k)
if len(groups) > 3 {
if events == nil {
events = make([]Event, 0, 3)
}
e, ok := findSingleEvent(k[groups[2]:groups[3]])
if !ok {
return KeySequenceEvent{}, false, errors.New("Invalid event " + k[groups[2]:groups[3]])
}
events = append(events, e)
k = k[groups[3]+1:]
} else {
return KeySequenceEvent{}, false, nil
}
}
return KeySequenceEvent{events}, true, nil
}
// findSingleEvent will find binding Key 'b' using string 'k'
func | (k string) (b Event, ok bool) {
modifiers := tcell.ModNone
// First, we'll strip off all the modifiers in the name and add them to the
// ModMask
modSearch:
for {
switch {
case strings.HasPrefix(k, "-") && k != "-":
// We optionally support dashes between modifiers
k = k[1:]
case strings.HasPrefix(k, "Ctrl") && k != "CtrlH":
// CtrlH technically does not have a 'Ctrl' modifier because it is really backspace
k = k[4:]
modifiers |= tcell.ModCtrl
case strings.HasPrefix(k, "Alt"):
k = k[3:]
modifiers |= tcell.ModAlt
case strings.HasPrefix(k, "Shift"):
k = k[5:]
modifiers |= tcell.ModShift
case strings.HasPrefix(k, "\x1b"):
screen.Screen.RegisterRawSeq(k)
return RawEvent{
esc: k,
}, true
default:
break modSearch
}
}
if k == "" {
return KeyEvent{}, false
}
// Control is handled in a special way, since the terminal sends explicitly
// marked escape sequences for control keys
// We should check for Control keys first
if modifiers&tcell.ModCtrl != 0 {
// see if the key is in bindingKeys with the Ctrl prefix.
k = string(unicode.ToUpper(rune(k[0]))) + k[1:]
if code, ok := keyEvents["Ctrl"+k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
// It is, we're done.
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
}
// See if we can find the key in bindingKeys
if code, ok := keyEvents[k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
// See if we can find the key in bindingMouse
if code, ok := mouseEvents[k]; ok {
return MouseEvent{
btn: code,
mod: modifiers,
}, true
}
// If we were given one character, then we've got a rune.
if len(k) == 1 {
return KeyEvent{
code: tcell.KeyRune,
mod: modifiers,
r: rune(k[0]),
}, true
}
// We don't know what happened.
return KeyEvent{}, false
}
func findEvent(k string) (Event, error) {
var event Event
event, ok, err := findEvents(k)
if err != nil {
return nil, err
}
if !ok {
event, ok = findSingleEvent(k)
if !ok {
return nil, errors.New(k + " is not a bindable event")
}
}
return event, nil
}
// TryBindKey tries to bind a key by writing to config.ConfigDir/bindings.json
// Returns true if the keybinding already existed and a possible error
func TryBindKey(k, v string, overwrite bool) (bool, error) {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return false, errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return false, errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return false, err
}
found := false
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
if overwrite {
parsed[ev] = v
}
found = true
break
}
}
}
if found && !overwrite {
return true, nil
} else if !found {
parsed[k] = v
}
BindKey(k, v, Binder["buffer"])
txt, _ := json.MarshalIndent(parsed, "", " ")
return true, ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return false, e
}
// UnbindKey removes the binding for a key from the bindings.json file
func UnbindKey(k string) error {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return err
}
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
delete(parsed, ev)
break
}
}
}
defaults := DefaultBindings("buffer")
if a, ok := defaults[k]; ok {
BindKey(k, a, Binder["buffer"])
} else if _, ok := config.Bindings["buffer"][k]; ok {
BufUnmap(key)
delete(config.Bindings["buffer"], k)
}
txt, _ := json.MarshalIndent(parsed, "", " ")
return ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return e
}
var mouseEvents = map[string]tcell.ButtonMask{
"MouseLeft": tcell.ButtonPrimary,
"MouseMiddle": tcell.ButtonMiddle,
"MouseRight": tcell.ButtonSecondary,
"MouseWheelUp": tcell.WheelUp,
"MouseWheelDown": tcell.WheelDown,
"MouseWheelLeft": tcell.WheelLeft,
"MouseWheelRight": tcell.WheelRight,
}
var keyEvents = map[string]tcell.Key{
"Up": tcell.KeyUp,
"Down": tcell.KeyDown,
"Right": tcell.KeyRight,
"Left": tcell.KeyLeft,
"UpLeft": tcell.KeyUpLeft,
"UpRight": tcell.KeyUpRight,
"DownLeft": tcell.KeyDownLeft,
"DownRight": tcell.KeyDownRight,
"Center": tcell.KeyCenter,
"PageUp": tcell.KeyPgUp,
"PageDown": tcell.KeyPgDn,
"Home": tcell.KeyHome,
"End": tcell.KeyEnd,
"Insert": tcell.KeyInsert,
"Delete": tcell.KeyDelete,
"Help": tcell.KeyHelp,
"Exit": tcell.KeyExit,
"Clear": tcell.KeyClear,
"Cancel": tcell.KeyCancel,
"Print": tcell.KeyPrint,
"Pause": tcell.KeyPause,
"Backtab": tcell.KeyBacktab,
"F1": tcell.KeyF1,
"F2": tcell.KeyF2 | findSingleEvent | identifier_name |
bindings.go |
for k, v := range parsed {
switch val := v.(type) {
case string:
BindKey(k, val, Binder["buffer"])
case map[string]interface{}:
bind, ok := Binder[k]
if !ok || bind == nil {
screen.TermMessage(fmt.Sprintf("%s is not a valid pane type", k))
continue
}
for e, a := range val {
s, ok := a.(string)
if !ok {
screen.TermMessage("Error reading bindings.json: non-string and non-map entry", k)
} else {
BindKey(e, s, bind)
}
}
default:
screen.TermMessage("Error reading bindings.json: non-string and non-map entry", k)
}
}
}
func BindKey(k, v string, bind func(e Event, a string)) {
event, err := findEvent(k)
if err != nil {
screen.TermMessage(err)
return
}
bind(event, v)
// switch e := event.(type) {
// case KeyEvent:
// InfoMapKey(e, v)
// case KeySequenceEvent:
// InfoMapKey(e, v)
// case MouseEvent:
// InfoMapMouse(e, v)
// case RawEvent:
// InfoMapKey(e, v)
// }
}
var r = regexp.MustCompile("<(.+?)>")
func findEvents(k string) (b KeySequenceEvent, ok bool, err error) {
var events []Event = nil
for len(k) > 0 {
groups := r.FindStringSubmatchIndex(k)
if len(groups) > 3 {
if events == nil {
events = make([]Event, 0, 3)
}
e, ok := findSingleEvent(k[groups[2]:groups[3]])
if !ok {
return KeySequenceEvent{}, false, errors.New("Invalid event " + k[groups[2]:groups[3]])
}
events = append(events, e)
k = k[groups[3]+1:]
} else {
return KeySequenceEvent{}, false, nil
}
}
return KeySequenceEvent{events}, true, nil
}
// findSingleEvent will find binding Key 'b' using string 'k'
func findSingleEvent(k string) (b Event, ok bool) {
modifiers := tcell.ModNone
// First, we'll strip off all the modifiers in the name and add them to the
// ModMask
modSearch:
for {
switch {
case strings.HasPrefix(k, "-") && k != "-":
// We optionally support dashes between modifiers
k = k[1:]
case strings.HasPrefix(k, "Ctrl") && k != "CtrlH":
// CtrlH technically does not have a 'Ctrl' modifier because it is really backspace
k = k[4:]
modifiers |= tcell.ModCtrl
case strings.HasPrefix(k, "Alt"):
k = k[3:]
modifiers |= tcell.ModAlt
case strings.HasPrefix(k, "Shift"):
k = k[5:]
modifiers |= tcell.ModShift
case strings.HasPrefix(k, "\x1b"):
screen.Screen.RegisterRawSeq(k)
return RawEvent{
esc: k,
}, true
default:
break modSearch
}
}
if k == "" {
return KeyEvent{}, false
}
// Control is handled in a special way, since the terminal sends explicitly
// marked escape sequences for control keys
// We should check for Control keys first
if modifiers&tcell.ModCtrl != 0 {
// see if the key is in bindingKeys with the Ctrl prefix.
k = string(unicode.ToUpper(rune(k[0]))) + k[1:]
if code, ok := keyEvents["Ctrl"+k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
// It is, we're done.
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
}
// See if we can find the key in bindingKeys
if code, ok := keyEvents[k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
// See if we can find the key in bindingMouse
if code, ok := mouseEvents[k]; ok {
return MouseEvent{
btn: code,
mod: modifiers,
}, true
}
// If we were given one character, then we've got a rune.
if len(k) == 1 {
return KeyEvent{
code: tcell.KeyRune,
mod: modifiers,
r: rune(k[0]),
}, true
}
// We don't know what happened.
return KeyEvent{}, false
}
func findEvent(k string) (Event, error) {
var event Event
event, ok, err := findEvents(k)
if err != nil {
return nil, err
}
if !ok {
event, ok = findSingleEvent(k)
if !ok {
return nil, errors.New(k + " is not a bindable event")
}
}
return event, nil
}
// TryBindKey tries to bind a key by writing to config.ConfigDir/bindings.json
// Returns true if the keybinding already existed and a possible error
func TryBindKey(k, v string, overwrite bool) (bool, error) {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return false, errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return false, errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return false, err
}
found := false
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
if overwrite {
parsed[ev] = v
}
found = true
break
}
}
}
if found && !overwrite {
return true, nil
} else if !found {
parsed[k] = v
}
BindKey(k, v, Binder["buffer"])
txt, _ := json.MarshalIndent(parsed, "", " ")
return true, ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return false, e
}
// UnbindKey removes the binding for a key from the bindings.json file
func UnbindKey(k string) error {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return err
}
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
delete(parsed, ev)
break
}
}
}
defaults := DefaultBindings("buffer")
if a, ok := defaults[k]; ok {
BindKey(k, a, Binder["buffer"])
} else if _, ok := config.Bindings["buffer"][k]; ok {
BufUnmap(key)
delete(config.Bindings["buffer"], k)
}
txt, _ := json.MarshalIndent(parsed, "", " ")
return ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return e
}
var mouseEvents = map[string]tcell.ButtonMask{
"MouseLeft": tcell.ButtonPrimary,
"MouseMiddle": tcell.ButtonMiddle,
"MouseRight": tcell.ButtonSecondary,
"MouseWheelUp": tcell.WheelUp,
"MouseWheelDown": tcell.WheelDown,
"MouseWheelLeft": tcell.WheelLeft,
"MouseWheelRight": tcell.WheelRight,
}
var keyEvents = map[string]tcell.Key{
"Up": | {
defaults := DefaultBindings(p)
for k, v := range defaults {
BindKey(k, v, bind)
}
} | conditional_block |
|
bindings.go | // switch e := event.(type) {
// case KeyEvent:
// InfoMapKey(e, v)
// case KeySequenceEvent:
// InfoMapKey(e, v)
// case MouseEvent:
// InfoMapMouse(e, v)
// case RawEvent:
// InfoMapKey(e, v)
// }
}
var r = regexp.MustCompile("<(.+?)>")
func findEvents(k string) (b KeySequenceEvent, ok bool, err error) {
var events []Event = nil
for len(k) > 0 {
groups := r.FindStringSubmatchIndex(k)
if len(groups) > 3 {
if events == nil {
events = make([]Event, 0, 3)
}
e, ok := findSingleEvent(k[groups[2]:groups[3]])
if !ok {
return KeySequenceEvent{}, false, errors.New("Invalid event " + k[groups[2]:groups[3]])
}
events = append(events, e)
k = k[groups[3]+1:]
} else {
return KeySequenceEvent{}, false, nil
}
}
return KeySequenceEvent{events}, true, nil
}
// findSingleEvent will find binding Key 'b' using string 'k'
func findSingleEvent(k string) (b Event, ok bool) {
modifiers := tcell.ModNone
// First, we'll strip off all the modifiers in the name and add them to the
// ModMask
modSearch:
for {
switch {
case strings.HasPrefix(k, "-") && k != "-":
// We optionally support dashes between modifiers
k = k[1:]
case strings.HasPrefix(k, "Ctrl") && k != "CtrlH":
// CtrlH technically does not have a 'Ctrl' modifier because it is really backspace
k = k[4:]
modifiers |= tcell.ModCtrl
case strings.HasPrefix(k, "Alt"):
k = k[3:]
modifiers |= tcell.ModAlt
case strings.HasPrefix(k, "Shift"):
k = k[5:]
modifiers |= tcell.ModShift
case strings.HasPrefix(k, "\x1b"):
screen.Screen.RegisterRawSeq(k)
return RawEvent{
esc: k,
}, true
default:
break modSearch
}
}
if k == "" {
return KeyEvent{}, false
}
// Control is handled in a special way, since the terminal sends explicitly
// marked escape sequences for control keys
// We should check for Control keys first
if modifiers&tcell.ModCtrl != 0 {
// see if the key is in bindingKeys with the Ctrl prefix.
k = string(unicode.ToUpper(rune(k[0]))) + k[1:]
if code, ok := keyEvents["Ctrl"+k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
// It is, we're done.
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
}
// See if we can find the key in bindingKeys
if code, ok := keyEvents[k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
// See if we can find the key in bindingMouse
if code, ok := mouseEvents[k]; ok {
return MouseEvent{
btn: code,
mod: modifiers,
}, true
}
// If we were given one character, then we've got a rune.
if len(k) == 1 {
return KeyEvent{
code: tcell.KeyRune,
mod: modifiers,
r: rune(k[0]),
}, true
}
// We don't know what happened.
return KeyEvent{}, false
}
func findEvent(k string) (Event, error) {
var event Event
event, ok, err := findEvents(k)
if err != nil {
return nil, err
}
if !ok {
event, ok = findSingleEvent(k)
if !ok {
return nil, errors.New(k + " is not a bindable event")
}
}
return event, nil
}
// TryBindKey tries to bind a key by writing to config.ConfigDir/bindings.json
// Returns true if the keybinding already existed and a possible error
func TryBindKey(k, v string, overwrite bool) (bool, error) {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return false, errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return false, errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return false, err
}
found := false
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
if overwrite {
parsed[ev] = v
}
found = true
break
}
}
}
if found && !overwrite {
return true, nil
} else if !found {
parsed[k] = v
}
BindKey(k, v, Binder["buffer"])
txt, _ := json.MarshalIndent(parsed, "", " ")
return true, ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return false, e
}
// UnbindKey removes the binding for a key from the bindings.json file
func UnbindKey(k string) error {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return err
}
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
delete(parsed, ev)
break
}
}
}
defaults := DefaultBindings("buffer")
if a, ok := defaults[k]; ok {
BindKey(k, a, Binder["buffer"])
} else if _, ok := config.Bindings["buffer"][k]; ok {
BufUnmap(key)
delete(config.Bindings["buffer"], k)
}
txt, _ := json.MarshalIndent(parsed, "", " ")
return ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return e
}
var mouseEvents = map[string]tcell.ButtonMask{
"MouseLeft": tcell.ButtonPrimary,
"MouseMiddle": tcell.ButtonMiddle,
"MouseRight": tcell.ButtonSecondary,
"MouseWheelUp": tcell.WheelUp,
"MouseWheelDown": tcell.WheelDown,
"MouseWheelLeft": tcell.WheelLeft,
"MouseWheelRight": tcell.WheelRight,
}
var keyEvents = map[string]tcell.Key{
"Up": tcell.KeyUp,
"Down": tcell.KeyDown,
"Right": tcell.KeyRight,
"Left": tcell.KeyLeft,
"UpLeft": tcell.KeyUpLeft,
"UpRight": tcell.KeyUpRight,
"DownLeft": tcell.KeyDownLeft,
"DownRight": tcell.KeyDownRight,
"Center": tcell.KeyCenter,
"PageUp": tcell.KeyPgUp,
"PageDown": tcell.KeyPgDn,
"Home": tcell.KeyHome,
"End": tcell.KeyEnd,
"Insert": tcell.KeyInsert,
"Delete": tcell.KeyDelete,
"Help": tcell.KeyHelp,
"Exit": tcell.KeyExit,
"Clear": tcell.KeyClear,
"Cancel": tcell.KeyCancel,
"Print": tcell.KeyPrint, | "F2": tcell.KeyF2,
| "Pause": tcell.KeyPause,
"Backtab": tcell.KeyBacktab,
"F1": tcell.KeyF1, | random_line_split |
bindings.go | // switch e := event.(type) {
// case KeyEvent:
// InfoMapKey(e, v)
// case KeySequenceEvent:
// InfoMapKey(e, v)
// case MouseEvent:
// InfoMapMouse(e, v)
// case RawEvent:
// InfoMapKey(e, v)
// }
}
var r = regexp.MustCompile("<(.+?)>")
func findEvents(k string) (b KeySequenceEvent, ok bool, err error) {
var events []Event = nil
for len(k) > 0 {
groups := r.FindStringSubmatchIndex(k)
if len(groups) > 3 {
if events == nil {
events = make([]Event, 0, 3)
}
e, ok := findSingleEvent(k[groups[2]:groups[3]])
if !ok {
return KeySequenceEvent{}, false, errors.New("Invalid event " + k[groups[2]:groups[3]])
}
events = append(events, e)
k = k[groups[3]+1:]
} else {
return KeySequenceEvent{}, false, nil
}
}
return KeySequenceEvent{events}, true, nil
}
// findSingleEvent will find binding Key 'b' using string 'k'
func findSingleEvent(k string) (b Event, ok bool) | modifiers |= tcell.ModShift
case strings.HasPrefix(k, "\x1b"):
screen.Screen.RegisterRawSeq(k)
return RawEvent{
esc: k,
}, true
default:
break modSearch
}
}
if k == "" {
return KeyEvent{}, false
}
// Control is handled in a special way, since the terminal sends explicitly
// marked escape sequences for control keys
// We should check for Control keys first
if modifiers&tcell.ModCtrl != 0 {
// see if the key is in bindingKeys with the Ctrl prefix.
k = string(unicode.ToUpper(rune(k[0]))) + k[1:]
if code, ok := keyEvents["Ctrl"+k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
// It is, we're done.
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
}
// See if we can find the key in bindingKeys
if code, ok := keyEvents[k]; ok {
var r tcell.Key
// Special case for escape, for some reason tcell doesn't send it with the esc character
if code < 256 && code != 27 {
r = code
}
return KeyEvent{
code: code,
mod: modifiers,
r: rune(r),
}, true
}
// See if we can find the key in bindingMouse
if code, ok := mouseEvents[k]; ok {
return MouseEvent{
btn: code,
mod: modifiers,
}, true
}
// If we were given one character, then we've got a rune.
if len(k) == 1 {
return KeyEvent{
code: tcell.KeyRune,
mod: modifiers,
r: rune(k[0]),
}, true
}
// We don't know what happened.
return KeyEvent{}, false
}
func findEvent(k string) (Event, error) {
var event Event
event, ok, err := findEvents(k)
if err != nil {
return nil, err
}
if !ok {
event, ok = findSingleEvent(k)
if !ok {
return nil, errors.New(k + " is not a bindable event")
}
}
return event, nil
}
// TryBindKey tries to bind a key by writing to config.ConfigDir/bindings.json
// Returns true if the keybinding already existed and a possible error
func TryBindKey(k, v string, overwrite bool) (bool, error) {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return false, errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return false, errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return false, err
}
found := false
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
if overwrite {
parsed[ev] = v
}
found = true
break
}
}
}
if found && !overwrite {
return true, nil
} else if !found {
parsed[k] = v
}
BindKey(k, v, Binder["buffer"])
txt, _ := json.MarshalIndent(parsed, "", " ")
return true, ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return false, e
}
// UnbindKey removes the binding for a key from the bindings.json file
func UnbindKey(k string) error {
var e error
var parsed map[string]interface{}
filename := filepath.Join(config.ConfigDir, "bindings.json")
createBindingsIfNotExist(filename)
if _, e = os.Stat(filename); e == nil {
input, err := ioutil.ReadFile(filename)
if err != nil {
return errors.New("Error reading bindings.json file: " + err.Error())
}
err = json5.Unmarshal(input, &parsed)
if err != nil {
return errors.New("Error reading bindings.json: " + err.Error())
}
key, err := findEvent(k)
if err != nil {
return err
}
for ev := range parsed {
if e, err := findEvent(ev); err == nil {
if e == key {
delete(parsed, ev)
break
}
}
}
defaults := DefaultBindings("buffer")
if a, ok := defaults[k]; ok {
BindKey(k, a, Binder["buffer"])
} else if _, ok := config.Bindings["buffer"][k]; ok {
BufUnmap(key)
delete(config.Bindings["buffer"], k)
}
txt, _ := json.MarshalIndent(parsed, "", " ")
return ioutil.WriteFile(filename, append(txt, '\n'), 0644)
}
return e
}
var mouseEvents = map[string]tcell.ButtonMask{
"MouseLeft": tcell.ButtonPrimary,
"MouseMiddle": tcell.ButtonMiddle,
"MouseRight": tcell.ButtonSecondary,
"MouseWheelUp": tcell.WheelUp,
"MouseWheelDown": tcell.WheelDown,
"MouseWheelLeft": tcell.WheelLeft,
"MouseWheelRight": tcell.WheelRight,
}
var keyEvents = map[string]tcell.Key{
"Up": tcell.KeyUp,
"Down": tcell.KeyDown,
"Right": tcell.KeyRight,
"Left": tcell.KeyLeft,
"UpLeft": tcell.KeyUpLeft,
"UpRight": tcell.KeyUpRight,
"DownLeft": tcell.KeyDownLeft,
"DownRight": tcell.KeyDownRight,
"Center": tcell.KeyCenter,
"PageUp": tcell.KeyPgUp,
"PageDown": tcell.KeyPgDn,
"Home": tcell.KeyHome,
"End": tcell.KeyEnd,
"Insert": tcell.KeyInsert,
"Delete": tcell.KeyDelete,
"Help": tcell.KeyHelp,
"Exit": tcell.KeyExit,
"Clear": tcell.KeyClear,
"Cancel": tcell.KeyCancel,
"Print": tcell.KeyPrint,
"Pause": tcell.KeyPause,
"Backtab": tcell.KeyBacktab,
"F1": tcell.KeyF1,
"F2": tcell.KeyF2 | {
modifiers := tcell.ModNone
// First, we'll strip off all the modifiers in the name and add them to the
// ModMask
modSearch:
for {
switch {
case strings.HasPrefix(k, "-") && k != "-":
// We optionally support dashes between modifiers
k = k[1:]
case strings.HasPrefix(k, "Ctrl") && k != "CtrlH":
// CtrlH technically does not have a 'Ctrl' modifier because it is really backspace
k = k[4:]
modifiers |= tcell.ModCtrl
case strings.HasPrefix(k, "Alt"):
k = k[3:]
modifiers |= tcell.ModAlt
case strings.HasPrefix(k, "Shift"):
k = k[5:] | identifier_body |
ed25519.rs |
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// If you want, you can optionally pass a "context". It is generally a
/// good idea to choose a context and try to make it unique to your project
/// and this specific usage of signatures.
///
/// For example, without this, if you were to [convert your OpenPGP key
/// to a Bitcoin key][terrible_idea] (just as an example, and also Don't
/// Ever Do That) and someone tricked you into signing an "email" which was
/// actually a Bitcoin transaction moving all your magic internet money to
/// their address, it'd be a valid transaction.
///
/// By adding a context, this trick becomes impossible, because the context
/// is concatenated into the hash, which is then signed. So, going with the
/// previous example, if your bitcoin wallet used a context of
/// "BitcoinWalletAppTxnSigning" and OpenPGP used a context (this is likely
/// the least of their safety problems) of "GPGsCryptoIsntConstantTimeLol",
/// then the signatures produced by both could never match the other, even
/// if they signed the exact same message with the same key.
///
/// Let's add a context for good measure (remember, you'll want to choose
/// your own!):
///
/// ```
/// # extern crate ed25519_dalek;
/// # extern crate rand;
/// #
/// # use ed25519_dalek::Digest;
/// # use ed25519_dalek::Keypair;
/// # use ed25519_dalek::Signature;
/// # use ed25519_dalek::Sha512;
/// # use rand::thread_rng;
/// #
/// # #[cfg(feature = "std")]
/// # fn main() {
/// # let mut csprng = thread_rng();
/// # let keypair: Keypair = Keypair::generate(&mut csprng);
/// # let message: &[u8] = b"All I want is to pet all of the dogs.";
/// # let mut prehashed: Sha512 = Sha512::new();
/// # prehashed.input(message);
/// #
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
/// [terrible_idea]: https://github.com/isislovecruft/scripts/blob/master/gpgkey2bc.py
pub fn sign_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&'static [u8]>,
) -> Signature
where
D: Digest<OutputSize = U64>,
{
let expanded: ExpandedSecretKey = (&self.secret).into(); // xxx thanks i hate this
expanded.sign_prehashed(prehashed_message, &self.public, context)
}
/// Verify a signature on a message with this keypair's public key.
pub fn verify(
&self,
message: &[u8],
signature: &Signature
) -> Result<(), SignatureError>
{
self.public.verify(message, signature)
}
/// Verify a `signature` on a `prehashed_message` using the Ed25519ph algorithm.
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
/// * `signature` is a purported Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Returns
///
/// Returns `true` if the `signature` was a valid signature created by this
/// `Keypair` on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
/// use ed25519_dalek::Sha512;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// let mut prehashed: Sha512 = Sha512::default();
/// prehashed.input(message);
///
/// let context: &[u8] = b"Ed25519DalekSignPrehashedDoctest";
///
/// let sig: Signature = keypair.sign_prehashed(prehashed, Some(context));
///
/// // The sha2::Sha512 struct doesn't implement Copy, so we'll have to create a new one:
/// let mut prehashed_again: Sha512 = Sha512::default();
/// prehashed_again.input(message);
///
/// let verified = keypair.public.verify_prehashed(prehashed_again, Some(context), &sig);
///
/// assert!(verified.is_ok());
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// [rfc8032]: https://tools.ietf.org/html/rfc8032#section-5.1
pub fn verify_prehashed<D>(
&self,
prehashed_message: D,
context: Option<&[u8]>,
signature: &Signature,
) -> Result<(), SignatureError>
where
D: Digest<OutputSize = U64>,
{
self.public.verify_prehashed(prehashed_message, context, signature)
}
}
#[cfg(feature = "serde")]
impl Serialize for Keypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self.to_bytes()[..])
}
}
#[cfg(feature = "serde")]
impl<'d> Deserialize<'d> for Keypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'d>,
{
struct KeypairVisitor;
impl<'d> Visitor<'d> for KeypairVisitor {
type Value = Keypair;
fn expecting(&self, formatter: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
| formatter.write_str("An ed25519 keypair, 64 bytes in total where the secret key is \
the first 32 bytes and is in unexpanded form, and the second \
32 bytes is a compressed point for a public key.")
}
| identifier_body |
|
ed25519.rs | .len(), ASSERT_MESSAGE);
assert!(signatures.len() == public_keys.len(), ASSERT_MESSAGE);
assert!(public_keys.len() == messages.len(), ASSERT_MESSAGE);
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;
use core::iter::once; | // Select a random 128-bit scalar for each signature.
let zs: Vec<Scalar> = signatures
.iter()
.map(|_| Scalar::from(thread_rng().gen::<u128>()))
.collect();
// Compute the basepoint coefficient, ∑ s[i]z[i] (mod l)
let B_coefficient: Scalar = signatures
.iter()
.map(|sig| sig.s)
.zip(zs.iter())
.map(|(s, z)| z * s)
.sum();
// Compute H(R || A || M) for each (signature, public_key, message) triplet
let hrams = (0..signatures.len()).map(|i| {
let mut h: Sha512 = Sha512::default();
h.input(signatures[i].R.as_bytes());
h.input(public_keys[i].as_bytes());
h.input(&messages[i]);
Scalar::from_hash(h)
});
// Multiply each H(R || A || M) by the random value
let zhrams = hrams.zip(zs.iter()).map(|(hram, z)| hram * z);
let Rs = signatures.iter().map(|sig| sig.R.decompress());
let As = public_keys.iter().map(|pk| Some(pk.1));
let B = once(Some(constants::ED25519_BASEPOINT_POINT));
// Compute (-∑ z[i]s[i] (mod l)) B + ∑ z[i]R[i] + ∑ (z[i]H(R||A||M)[i] (mod l)) A[i] = 0
let id = EdwardsPoint::optional_multiscalar_mul(
once(-B_coefficient).chain(zs.iter().cloned()).chain(zhrams),
B.chain(Rs).chain(As),
).ok_or_else(|| SignatureError(InternalError::VerifyError))?;
if id.is_identity() {
Ok(())
} else {
Err(SignatureError(InternalError::VerifyError))
}
}
/// An ed25519 keypair.
#[derive(Debug, Default)] // we derive Default in order to use the clear() method in Drop
pub struct Keypair {
/// The secret half of this keypair.
pub secret: SecretKey,
/// The public half of this keypair.
pub public: PublicKey,
}
impl Keypair {
/// Convert this keypair to bytes.
///
/// # Returns
///
/// An array of bytes, `[u8; KEYPAIR_LENGTH]`. The first
/// `SECRET_KEY_LENGTH` of bytes is the `SecretKey`, and the next
/// `PUBLIC_KEY_LENGTH` bytes is the `PublicKey` (the same as other
/// libraries, such as [Adam Langley's ed25519 Golang
/// implementation](https://github.com/agl/ed25519/)).
pub fn to_bytes(&self) -> [u8; KEYPAIR_LENGTH] {
let mut bytes: [u8; KEYPAIR_LENGTH] = [0u8; KEYPAIR_LENGTH];
bytes[..SECRET_KEY_LENGTH].copy_from_slice(self.secret.as_bytes());
bytes[SECRET_KEY_LENGTH..].copy_from_slice(self.public.as_bytes());
bytes
}
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `bytes`: an `&[u8]` representing the scalar for the secret key, and a
/// compressed Edwards-Y coordinate of a point on curve25519, both as bytes.
/// (As obtained from `Keypair::to_bytes()`.)
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Result` whose okay value is an EdDSA `Keypair` or whose error value
/// is an `SignatureError` describing the error that occurred.
pub fn from_bytes<'a>(bytes: &'a [u8]) -> Result<Keypair, SignatureError> {
if bytes.len() != KEYPAIR_LENGTH {
return Err(SignatureError(InternalError::BytesLengthError {
name: "Keypair",
length: KEYPAIR_LENGTH,
}));
}
let secret = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH])?;
let public = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..])?;
Ok(Keypair{ secret: secret, public: public })
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate ed25519_dalek;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
///
/// use rand::Rng;
/// use rand::rngs::OsRng;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let keypair: Keypair = Keypair::generate(&mut OsRng);
///
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. `rand_chacha::ChaChaRng`.
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
pub fn generate<R>(csprng: &mut R) -> Keypair
where
R: CryptoRng + Rng,
{
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = (&sk).into();
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign(&self, message: &[u8]) -> Signature {
let expanded: ExpandedSecretKey = (&self.secret).into();
expanded.sign(&message, &self.public)
}
/// Sign a `prehashed_message` with this `Keypair` using the
/// Ed25519ph algorithm defined in [RFC8032 §5.1][rfc8032].
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main | use rand::thread_rng;
use curve25519_dalek::traits::IsIdentity;
use curve25519_dalek::traits::VartimeMultiscalarMul;
| random_line_split |
ed25519.rs | (), ASSERT_MESSAGE);
assert!(signatures.len() == public_keys.len(), ASSERT_MESSAGE);
assert!(public_keys.len() == messages.len(), ASSERT_MESSAGE);
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;
use core::iter::once;
use rand::thread_rng;
use curve25519_dalek::traits::IsIdentity;
use curve25519_dalek::traits::VartimeMultiscalarMul;
// Select a random 128-bit scalar for each signature.
let zs: Vec<Scalar> = signatures
.iter()
.map(|_| Scalar::from(thread_rng().gen::<u128>()))
.collect();
// Compute the basepoint coefficient, ∑ s[i]z[i] (mod l)
let B_coefficient: Scalar = signatures
.iter()
.map(|sig| sig.s)
.zip(zs.iter())
.map(|(s, z)| z * s)
.sum();
// Compute H(R || A || M) for each (signature, public_key, message) triplet
let hrams = (0..signatures.len()).map(|i| {
let mut h: Sha512 = Sha512::default();
h.input(signatures[i].R.as_bytes());
h.input(public_keys[i].as_bytes());
h.input(&messages[i]);
Scalar::from_hash(h)
});
// Multiply each H(R || A || M) by the random value
let zhrams = hrams.zip(zs.iter()).map(|(hram, z)| hram * z);
let Rs = signatures.iter().map(|sig| sig.R.decompress());
let As = public_keys.iter().map(|pk| Some(pk.1));
let B = once(Some(constants::ED25519_BASEPOINT_POINT));
// Compute (-∑ z[i]s[i] (mod l)) B + ∑ z[i]R[i] + ∑ (z[i]H(R||A||M)[i] (mod l)) A[i] = 0
let id = EdwardsPoint::optional_multiscalar_mul(
once(-B_coefficient).chain(zs.iter().cloned()).chain(zhrams),
B.chain(Rs).chain(As),
).ok_or_else(|| SignatureError(InternalError::VerifyError))?;
if id.is_identity() {
Ok(())
} else {
Err(SignatureError(InternalError::VerifyError))
}
}
/// An ed25519 keypair.
#[derive(Debug, Default)] // we derive Default in order to use the clear() method in Drop
pub struct Keypair {
/// The secret half of this keypair.
pub secret: SecretKey,
/// The public half of this keypair.
pub public: PublicKey,
}
impl Keypair {
/// Convert this keypair to bytes.
///
/// # Returns
///
/// An array of bytes, `[u8; KEYPAIR_LENGTH]`. The first
/// `SECRET_KEY_LENGTH` of bytes is the `SecretKey`, and the next
/// `PUBLIC_KEY_LENGTH` bytes is the `PublicKey` (the same as other
/// libraries, such as [Adam Langley's ed25519 Golang
/// implementation](https://github.com/agl/ed25519/)).
pub fn to_bytes(&self) -> [u8; KEYPAIR_LENGTH] {
let mut bytes: [u8; KEYPAIR_LENGTH] = [0u8; KEYPAIR_LENGTH];
bytes[..SECRET_KEY_LENGTH].copy_from_slice(self.secret.as_bytes());
bytes[SECRET_KEY_LENGTH..].copy_from_slice(self.public.as_bytes());
bytes
}
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `bytes`: an `&[u8]` representing the scalar for the secret key, and a
/// compressed Edwards-Y coordinate of a point on curve25519, both as bytes.
/// (As obtained from `Keypair::to_bytes()`.)
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Result` whose okay value is an EdDSA `Keypair` or whose error value
/// is an `SignatureError` describing the error that occurred.
pub fn from_byt | es: &'a [u8]) -> Result<Keypair, SignatureError> {
if bytes.len() != KEYPAIR_LENGTH {
return Err(SignatureError(InternalError::BytesLengthError {
name: "Keypair",
length: KEYPAIR_LENGTH,
}));
}
let secret = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH])?;
let public = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..])?;
Ok(Keypair{ secret: secret, public: public })
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate ed25519_dalek;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
///
/// use rand::Rng;
/// use rand::rngs::OsRng;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let keypair: Keypair = Keypair::generate(&mut OsRng);
///
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. `rand_chacha::ChaChaRng`.
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
pub fn generate<R>(csprng: &mut R) -> Keypair
where
R: CryptoRng + Rng,
{
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = (&sk).into();
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign(&self, message: &[u8]) -> Signature {
let expanded: ExpandedSecretKey = (&self.secret).into();
expanded.sign(&message, &self.public)
}
/// Sign a `prehashed_message` with this `Keypair` using the
/// Ed25519ph algorithm defined in [RFC8032 §5.1][rfc8032].
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// | es<'a>(byt | identifier_name |
ed25519.rs | .len(), ASSERT_MESSAGE);
assert!(signatures.len() == public_keys.len(), ASSERT_MESSAGE);
assert!(public_keys.len() == messages.len(), ASSERT_MESSAGE);
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::vec::Vec;
use core::iter::once;
use rand::thread_rng;
use curve25519_dalek::traits::IsIdentity;
use curve25519_dalek::traits::VartimeMultiscalarMul;
// Select a random 128-bit scalar for each signature.
let zs: Vec<Scalar> = signatures
.iter()
.map(|_| Scalar::from(thread_rng().gen::<u128>()))
.collect();
// Compute the basepoint coefficient, ∑ s[i]z[i] (mod l)
let B_coefficient: Scalar = signatures
.iter()
.map(|sig| sig.s)
.zip(zs.iter())
.map(|(s, z)| z * s)
.sum();
// Compute H(R || A || M) for each (signature, public_key, message) triplet
let hrams = (0..signatures.len()).map(|i| {
let mut h: Sha512 = Sha512::default();
h.input(signatures[i].R.as_bytes());
h.input(public_keys[i].as_bytes());
h.input(&messages[i]);
Scalar::from_hash(h)
});
// Multiply each H(R || A || M) by the random value
let zhrams = hrams.zip(zs.iter()).map(|(hram, z)| hram * z);
let Rs = signatures.iter().map(|sig| sig.R.decompress());
let As = public_keys.iter().map(|pk| Some(pk.1));
let B = once(Some(constants::ED25519_BASEPOINT_POINT));
// Compute (-∑ z[i]s[i] (mod l)) B + ∑ z[i]R[i] + ∑ (z[i]H(R||A||M)[i] (mod l)) A[i] = 0
let id = EdwardsPoint::optional_multiscalar_mul(
once(-B_coefficient).chain(zs.iter().cloned()).chain(zhrams),
B.chain(Rs).chain(As),
).ok_or_else(|| SignatureError(InternalError::VerifyError))?;
if id.is_identity() {
| Err(SignatureError(InternalError::VerifyError))
}
}
/// An ed25519 keypair.
#[derive(Debug, Default)] // we derive Default in order to use the clear() method in Drop
pub struct Keypair {
/// The secret half of this keypair.
pub secret: SecretKey,
/// The public half of this keypair.
pub public: PublicKey,
}
impl Keypair {
/// Convert this keypair to bytes.
///
/// # Returns
///
/// An array of bytes, `[u8; KEYPAIR_LENGTH]`. The first
/// `SECRET_KEY_LENGTH` of bytes is the `SecretKey`, and the next
/// `PUBLIC_KEY_LENGTH` bytes is the `PublicKey` (the same as other
/// libraries, such as [Adam Langley's ed25519 Golang
/// implementation](https://github.com/agl/ed25519/)).
pub fn to_bytes(&self) -> [u8; KEYPAIR_LENGTH] {
let mut bytes: [u8; KEYPAIR_LENGTH] = [0u8; KEYPAIR_LENGTH];
bytes[..SECRET_KEY_LENGTH].copy_from_slice(self.secret.as_bytes());
bytes[SECRET_KEY_LENGTH..].copy_from_slice(self.public.as_bytes());
bytes
}
/// Construct a `Keypair` from the bytes of a `PublicKey` and `SecretKey`.
///
/// # Inputs
///
/// * `bytes`: an `&[u8]` representing the scalar for the secret key, and a
/// compressed Edwards-Y coordinate of a point on curve25519, both as bytes.
/// (As obtained from `Keypair::to_bytes()`.)
///
/// # Warning
///
/// Absolutely no validation is done on the key. If you give this function
/// bytes which do not represent a valid point, or which do not represent
/// corresponding parts of the key, then your `Keypair` will be broken and
/// it will be your fault.
///
/// # Returns
///
/// A `Result` whose okay value is an EdDSA `Keypair` or whose error value
/// is an `SignatureError` describing the error that occurred.
pub fn from_bytes<'a>(bytes: &'a [u8]) -> Result<Keypair, SignatureError> {
if bytes.len() != KEYPAIR_LENGTH {
return Err(SignatureError(InternalError::BytesLengthError {
name: "Keypair",
length: KEYPAIR_LENGTH,
}));
}
let secret = SecretKey::from_bytes(&bytes[..SECRET_KEY_LENGTH])?;
let public = PublicKey::from_bytes(&bytes[SECRET_KEY_LENGTH..])?;
Ok(Keypair{ secret: secret, public: public })
}
/// Generate an ed25519 keypair.
///
/// # Example
///
/// ```
/// extern crate rand;
/// extern crate ed25519_dalek;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
///
/// use rand::Rng;
/// use rand::rngs::OsRng;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Signature;
///
/// let keypair: Keypair = Keypair::generate(&mut OsRng);
///
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn main() { }
/// ```
///
/// # Input
///
/// A CSPRNG with a `fill_bytes()` method, e.g. `rand_chacha::ChaChaRng`.
///
/// The caller must also supply a hash function which implements the
/// `Digest` and `Default` traits, and which returns 512 bits of output.
/// The standard hash function used for most ed25519 libraries is SHA-512,
/// which is available with `use sha2::Sha512` as in the example above.
/// Other suitable hash functions include Keccak-512 and Blake2b-512.
pub fn generate<R>(csprng: &mut R) -> Keypair
where
R: CryptoRng + Rng,
{
let sk: SecretKey = SecretKey::generate(csprng);
let pk: PublicKey = (&sk).into();
Keypair{ public: pk, secret: sk }
}
/// Sign a message with this keypair's secret key.
pub fn sign(&self, message: &[u8]) -> Signature {
let expanded: ExpandedSecretKey = (&self.secret).into();
expanded.sign(&message, &self.public)
}
/// Sign a `prehashed_message` with this `Keypair` using the
/// Ed25519ph algorithm defined in [RFC8032 §5.1][rfc8032].
///
/// # Inputs
///
/// * `prehashed_message` is an instantiated hash digest with 512-bits of
/// output which has had the message to be signed previously fed into its
/// state.
/// * `context` is an optional context string, up to 255 bytes inclusive,
/// which may be used to provide additional domain separation. If not
/// set, this will default to an empty string.
///
/// # Returns
///
/// An Ed25519ph [`Signature`] on the `prehashed_message`.
///
/// # Examples
///
/// ```
/// extern crate ed25519_dalek;
/// extern crate rand;
///
/// use ed25519_dalek::Digest;
/// use ed25519_dalek::Keypair;
/// use ed25519_dalek::Sha512;
/// use ed25519_dalek::Signature;
/// use rand::thread_rng;
///
/// # #[cfg(feature = "std")]
/// # fn main() {
/// let mut csprng = thread_rng();
/// let keypair: Keypair = Keypair::generate(&mut csprng);
/// let message: &[u8] = b"All I want is to pet all of the dogs.";
///
/// // Create a hash digest object which we'll feed the message into:
/// let mut prehashed: Sha512 = Sha512::new();
///
/// prehashed.input(message);
/// # }
/// #
/// # #[cfg(not(feature = "std"))]
/// # fn | Ok(())
} else {
| conditional_block |
describe.go | data types which can be directly used
// for printing out
type revisionDesc struct {
revision *servingv1.Revision
// traffic stuff
percent int64
tag string
latestTraffic *bool
configurationGeneration int
// status info
latestCreated bool
latestReady bool
}
// [REMOVE COMMENT WHEN MOVING TO 0.7.0]
// For transition to v1beta1 this command uses the migration approach as described
// in https://docs.google.com/presentation/d/1mOhnhy8kA4-K9Necct-NeIwysxze_FUule-8u5ZHmwA/edit#slide=id.p
// With serving 0.6.0 we are at step #1
// I.e we first look at new fields of the v1alpha1 API before falling back to the original ones.
// As this command does not do any writes/updates, it's just a matter of fallbacks.
// [/REMOVE COMMENT WHEN MOVING TO 0.7.0]
var describe_example = `
# Describe service 'svc' in human friendly format
kn service describe svc
# Describe service 'svc' in YAML format
kn service describe svc -o yaml
# Print only service URL
kn service describe svc -o url
# Describe the services in offline mode instead of kubernetes cluster (Beta)
kn service describe test -n test-ns --target=/user/knfiles
kn service describe test --target=/user/knfiles/test.yaml
kn service describe test --target=/user/knfiles/test.json`
// NewServiceDescribeCommand returns a new command for describing a service.
func NewServiceDescribeCommand(p *commands.KnParams) *cobra.Command {
// For machine readable output
machineReadablePrintFlags := genericclioptions.NewPrintFlags("")
command := &cobra.Command{
Use: "describe NAME",
Short: "Show details of a service",
Example: describe_example,
ValidArgsFunction: commands.ResourceNameCompletionFunc(p),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("'service describe' requires the service name given as single argument")
}
serviceName := args[0]
namespace, err := p.GetNamespace(cmd) | client, err := newServingClient(p, namespace, cmd.Flag("target").Value.String())
if err != nil {
return err
}
service, err := client.GetService(cmd.Context(), serviceName)
if err != nil {
return err
}
// Print out machine readable output if requested
if machineReadablePrintFlags.OutputFlagSpecified() {
out := cmd.OutOrStdout()
if strings.ToLower(*machineReadablePrintFlags.OutputFormat) == "url" {
fmt.Fprintf(out, "%s\n", extractURL(service))
return nil
}
printer, err := machineReadablePrintFlags.ToPrinter()
if err != nil {
return err
}
return printer.PrintObj(service, out)
}
printDetails, err = cmd.Flags().GetBool("verbose")
if err != nil {
return err
}
revisionDescs, err := getRevisionDescriptions(cmd.Context(), client, service, printDetails)
if err != nil {
return err
}
return describe(cmd.OutOrStdout(), service, revisionDescs, printDetails)
},
}
flags := command.Flags()
commands.AddNamespaceFlags(flags, false)
commands.AddGitOpsFlags(flags)
flags.BoolP("verbose", "v", false, "More output.")
machineReadablePrintFlags.AddFlags(command)
command.Flag("output").Usage = fmt.Sprintf("Output format. One of: %s.", strings.Join(append(machineReadablePrintFlags.AllowedFormats(), "url"), "|"))
return command
}
// Main action describing the service
func describe(w io.Writer, service *servingv1.Service, revisions []*revisionDesc, printDetails bool) error {
dw := printers.NewPrefixWriter(w)
// Service info
writeService(dw, service)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Revisions summary info
writeRevisions(dw, revisions, printDetails)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Condition info
commands.WriteConditions(dw, service.Status.Conditions, printDetails)
if err := dw.Flush(); err != nil {
return err
}
return nil
}
// Write out main service information. Use colors for major items.
func writeService(dw printers.PrefixWriter, service *servingv1.Service) {
commands.WriteMetadata(dw, &service.ObjectMeta, printDetails)
dw.WriteAttribute("URL", extractURL(service))
if printDetails {
if service.Status.Address != nil {
url := service.Status.Address.URL
dw.WriteAttribute("Cluster", url.String())
}
}
if service.Spec.Template.Spec.ServiceAccountName != "" {
dw.WriteAttribute("Service Account", service.Spec.Template.Spec.ServiceAccountName)
}
if service.Spec.Template.Spec.ImagePullSecrets != nil {
dw.WriteAttribute("Image Pull Secret", service.Spec.Template.Spec.ImagePullSecrets[0].Name)
}
}
// Write out revisions associated with this service. By default only active
// target revisions are printed, but with --verbose also inactive revisions
// created by this services are shown
func writeRevisions(dw printers.PrefixWriter, revisions []*revisionDesc, printDetails bool) {
revSection := dw.WriteAttribute("Revisions", "")
dw.Flush()
for _, revisionDesc := range revisions {
ready := apis.Condition{
Type: apis.ConditionReady,
Status: corev1.ConditionUnknown,
}
for _, cond := range revisionDesc.revision.Status.Conditions {
if cond.Type == apis.ConditionReady {
ready = cond
break
}
}
section := revSection.WriteColsLn(formatBullet(revisionDesc.percent, ready.Status), revisionHeader(revisionDesc))
if ready.Status == corev1.ConditionFalse {
section.WriteAttribute("Error", ready.Reason)
}
revision.WriteImage(section, revisionDesc.revision)
revision.WriteReplicas(section, revisionDesc.revision)
if printDetails {
revision.WritePort(section, revisionDesc.revision)
revision.WriteEnv(section, revisionDesc.revision, printDetails)
revision.WriteEnvFrom(section, revisionDesc.revision, printDetails)
revision.WriteScale(section, revisionDesc.revision)
revision.WriteConcurrencyOptions(section, revisionDesc.revision)
revision.WriteResources(section, revisionDesc.revision)
}
}
}
// ======================================================================================
// Helper functions
// Format the revision name along with its generation. Use colors if enabled.
func revisionHeader(desc *revisionDesc) string {
header := desc.revision.Name
if desc.latestTraffic != nil && *desc.latestTraffic {
header = fmt.Sprintf("@latest (%s)", desc.revision.Name)
} else if desc.latestReady {
header = desc.revision.Name + " (current @latest)"
} else if desc.latestCreated {
header = desc.revision.Name + " (latest created)"
}
if desc.tag != "" {
header = fmt.Sprintf("%s #%s", header, desc.tag)
}
return header + " " +
"[" + strconv.Itoa(desc.configurationGeneration) + "]" +
" " +
"(" + commands.Age(desc.revision.CreationTimestamp.Time) + ")"
}
// Format target percentage that it fits in the revision table
func formatBullet(percentage int64, status corev1.ConditionStatus) string {
symbol := "+"
switch status {
case corev1.ConditionTrue:
if percentage > 0 {
symbol = "%"
}
case corev1.ConditionFalse:
symbol = "!"
default:
symbol = "?"
}
if percentage == 0 {
return fmt.Sprintf(" %s", symbol)
}
return fmt.Sprintf("%3d%s", percentage, symbol)
}
// Call the backend to query revisions for the given service and build up
// the view objects used for output
func getRevisionDescriptions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, withDetails bool) ([]*revisionDesc, error) {
revisionsSeen := sets.NewString()
revisionDescs := []*revisionDesc{}
trafficTargets := service.Status.Traffic
var err error
for i := range trafficTargets {
target := trafficTargets[i]
revision, err := extractRevisionFromTarget(ctx, client, target)
if err != nil {
return nil, fmt.Errorf("cannot extract revision from service %s: %w", service.Name, err)
}
revisionsSeen.Insert(revision.Name)
desc, err := newRevisionDesc(*revision, &target, service)
if err != nil {
return nil, err
}
revisionDescs = append(revisionDescs, desc)
}
if revisionDescs, err = completeWithLatestRevisions(ctx, client, service, revisionsSeen, revisionDescs); err != nil {
return nil, err
}
if withDetails {
if revisionDescs, err = completeWithUntargetedRevisions(ctx, client, service, revisions | if err != nil {
return err
}
| random_line_split |
describe.go | data types which can be directly used
// for printing out
type revisionDesc struct {
revision *servingv1.Revision
// traffic stuff
percent int64
tag string
latestTraffic *bool
configurationGeneration int
// status info
latestCreated bool
latestReady bool
}
// [REMOVE COMMENT WHEN MOVING TO 0.7.0]
// For transition to v1beta1 this command uses the migration approach as described
// in https://docs.google.com/presentation/d/1mOhnhy8kA4-K9Necct-NeIwysxze_FUule-8u5ZHmwA/edit#slide=id.p
// With serving 0.6.0 we are at step #1
// I.e we first look at new fields of the v1alpha1 API before falling back to the original ones.
// As this command does not do any writes/updates, it's just a matter of fallbacks.
// [/REMOVE COMMENT WHEN MOVING TO 0.7.0]
var describe_example = `
# Describe service 'svc' in human friendly format
kn service describe svc
# Describe service 'svc' in YAML format
kn service describe svc -o yaml
# Print only service URL
kn service describe svc -o url
# Describe the services in offline mode instead of kubernetes cluster (Beta)
kn service describe test -n test-ns --target=/user/knfiles
kn service describe test --target=/user/knfiles/test.yaml
kn service describe test --target=/user/knfiles/test.json`
// NewServiceDescribeCommand returns a new command for describing a service.
func NewServiceDescribeCommand(p *commands.KnParams) *cobra.Command {
// For machine readable output
machineReadablePrintFlags := genericclioptions.NewPrintFlags("")
command := &cobra.Command{
Use: "describe NAME",
Short: "Show details of a service",
Example: describe_example,
ValidArgsFunction: commands.ResourceNameCompletionFunc(p),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("'service describe' requires the service name given as single argument")
}
serviceName := args[0]
namespace, err := p.GetNamespace(cmd)
if err != nil {
return err
}
client, err := newServingClient(p, namespace, cmd.Flag("target").Value.String())
if err != nil {
return err
}
service, err := client.GetService(cmd.Context(), serviceName)
if err != nil {
return err
}
// Print out machine readable output if requested
if machineReadablePrintFlags.OutputFlagSpecified() {
out := cmd.OutOrStdout()
if strings.ToLower(*machineReadablePrintFlags.OutputFormat) == "url" {
fmt.Fprintf(out, "%s\n", extractURL(service))
return nil
}
printer, err := machineReadablePrintFlags.ToPrinter()
if err != nil {
return err
}
return printer.PrintObj(service, out)
}
printDetails, err = cmd.Flags().GetBool("verbose")
if err != nil {
return err
}
revisionDescs, err := getRevisionDescriptions(cmd.Context(), client, service, printDetails)
if err != nil {
return err
}
return describe(cmd.OutOrStdout(), service, revisionDescs, printDetails)
},
}
flags := command.Flags()
commands.AddNamespaceFlags(flags, false)
commands.AddGitOpsFlags(flags)
flags.BoolP("verbose", "v", false, "More output.")
machineReadablePrintFlags.AddFlags(command)
command.Flag("output").Usage = fmt.Sprintf("Output format. One of: %s.", strings.Join(append(machineReadablePrintFlags.AllowedFormats(), "url"), "|"))
return command
}
// Main action describing the service
func describe(w io.Writer, service *servingv1.Service, revisions []*revisionDesc, printDetails bool) error {
dw := printers.NewPrefixWriter(w)
// Service info
writeService(dw, service)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Revisions summary info
writeRevisions(dw, revisions, printDetails)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Condition info
commands.WriteConditions(dw, service.Status.Conditions, printDetails)
if err := dw.Flush(); err != nil {
return err
}
return nil
}
// Write out main service information. Use colors for major items.
func writeService(dw printers.PrefixWriter, service *servingv1.Service) {
commands.WriteMetadata(dw, &service.ObjectMeta, printDetails)
dw.WriteAttribute("URL", extractURL(service))
if printDetails {
if service.Status.Address != nil {
url := service.Status.Address.URL
dw.WriteAttribute("Cluster", url.String())
}
}
if service.Spec.Template.Spec.ServiceAccountName != "" {
dw.WriteAttribute("Service Account", service.Spec.Template.Spec.ServiceAccountName)
}
if service.Spec.Template.Spec.ImagePullSecrets != nil {
dw.WriteAttribute("Image Pull Secret", service.Spec.Template.Spec.ImagePullSecrets[0].Name)
}
}
// Write out revisions associated with this service. By default only active
// target revisions are printed, but with --verbose also inactive revisions
// created by this services are shown
func writeRevisions(dw printers.PrefixWriter, revisions []*revisionDesc, printDetails bool) {
revSection := dw.WriteAttribute("Revisions", "")
dw.Flush()
for _, revisionDesc := range revisions {
ready := apis.Condition{
Type: apis.ConditionReady,
Status: corev1.ConditionUnknown,
}
for _, cond := range revisionDesc.revision.Status.Conditions { | section := revSection.WriteColsLn(formatBullet(revisionDesc.percent, ready.Status), revisionHeader(revisionDesc))
if ready.Status == corev1.ConditionFalse {
section.WriteAttribute("Error", ready.Reason)
}
revision.WriteImage(section, revisionDesc.revision)
revision.WriteReplicas(section, revisionDesc.revision)
if printDetails {
revision.WritePort(section, revisionDesc.revision)
revision.WriteEnv(section, revisionDesc.revision, printDetails)
revision.WriteEnvFrom(section, revisionDesc.revision, printDetails)
revision.WriteScale(section, revisionDesc.revision)
revision.WriteConcurrencyOptions(section, revisionDesc.revision)
revision.WriteResources(section, revisionDesc.revision)
}
}
}
// ======================================================================================
// Helper functions
// Format the revision name along with its generation. Use colors if enabled.
func revisionHeader(desc *revisionDesc) string {
header := desc.revision.Name
if desc.latestTraffic != nil && *desc.latestTraffic {
header = fmt.Sprintf("@latest (%s)", desc.revision.Name)
} else if desc.latestReady {
header = desc.revision.Name + " (current @latest)"
} else if desc.latestCreated {
header = desc.revision.Name + " (latest created)"
}
if desc.tag != "" {
header = fmt.Sprintf("%s #%s", header, desc.tag)
}
return header + " " +
"[" + strconv.Itoa(desc.configurationGeneration) + "]" +
" " +
"(" + commands.Age(desc.revision.CreationTimestamp.Time) + ")"
}
// Format target percentage that it fits in the revision table
func formatBullet(percentage int64, status corev1.ConditionStatus) string {
symbol := "+"
switch status {
case corev1.ConditionTrue:
if percentage > 0 {
symbol = "%"
}
case corev1.ConditionFalse:
symbol = "!"
default:
symbol = "?"
}
if percentage == 0 {
return fmt.Sprintf(" %s", symbol)
}
return fmt.Sprintf("%3d%s", percentage, symbol)
}
// Call the backend to query revisions for the given service and build up
// the view objects used for output
func getRevisionDescriptions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, withDetails bool) ([]*revisionDesc, error) {
revisionsSeen := sets.NewString()
revisionDescs := []*revisionDesc{}
trafficTargets := service.Status.Traffic
var err error
for i := range trafficTargets {
target := trafficTargets[i]
revision, err := extractRevisionFromTarget(ctx, client, target)
if err != nil {
return nil, fmt.Errorf("cannot extract revision from service %s: %w", service.Name, err)
}
revisionsSeen.Insert(revision.Name)
desc, err := newRevisionDesc(*revision, &target, service)
if err != nil {
return nil, err
}
revisionDescs = append(revisionDescs, desc)
}
if revisionDescs, err = completeWithLatestRevisions(ctx, client, service, revisionsSeen, revisionDescs); err != nil {
return nil, err
}
if withDetails {
if revisionDescs, err = completeWithUntargetedRevisions(ctx, client, service, |
if cond.Type == apis.ConditionReady {
ready = cond
break
}
}
| conditional_block |
describe.go | (out, "%s\n", extractURL(service))
return nil
}
printer, err := machineReadablePrintFlags.ToPrinter()
if err != nil {
return err
}
return printer.PrintObj(service, out)
}
printDetails, err = cmd.Flags().GetBool("verbose")
if err != nil {
return err
}
revisionDescs, err := getRevisionDescriptions(cmd.Context(), client, service, printDetails)
if err != nil {
return err
}
return describe(cmd.OutOrStdout(), service, revisionDescs, printDetails)
},
}
flags := command.Flags()
commands.AddNamespaceFlags(flags, false)
commands.AddGitOpsFlags(flags)
flags.BoolP("verbose", "v", false, "More output.")
machineReadablePrintFlags.AddFlags(command)
command.Flag("output").Usage = fmt.Sprintf("Output format. One of: %s.", strings.Join(append(machineReadablePrintFlags.AllowedFormats(), "url"), "|"))
return command
}
// Main action describing the service
func describe(w io.Writer, service *servingv1.Service, revisions []*revisionDesc, printDetails bool) error {
dw := printers.NewPrefixWriter(w)
// Service info
writeService(dw, service)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Revisions summary info
writeRevisions(dw, revisions, printDetails)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Condition info
commands.WriteConditions(dw, service.Status.Conditions, printDetails)
if err := dw.Flush(); err != nil {
return err
}
return nil
}
// Write out main service information. Use colors for major items.
func writeService(dw printers.PrefixWriter, service *servingv1.Service) {
commands.WriteMetadata(dw, &service.ObjectMeta, printDetails)
dw.WriteAttribute("URL", extractURL(service))
if printDetails {
if service.Status.Address != nil {
url := service.Status.Address.URL
dw.WriteAttribute("Cluster", url.String())
}
}
if service.Spec.Template.Spec.ServiceAccountName != "" {
dw.WriteAttribute("Service Account", service.Spec.Template.Spec.ServiceAccountName)
}
if service.Spec.Template.Spec.ImagePullSecrets != nil {
dw.WriteAttribute("Image Pull Secret", service.Spec.Template.Spec.ImagePullSecrets[0].Name)
}
}
// Write out revisions associated with this service. By default only active
// target revisions are printed, but with --verbose also inactive revisions
// created by this services are shown
func writeRevisions(dw printers.PrefixWriter, revisions []*revisionDesc, printDetails bool) {
revSection := dw.WriteAttribute("Revisions", "")
dw.Flush()
for _, revisionDesc := range revisions {
ready := apis.Condition{
Type: apis.ConditionReady,
Status: corev1.ConditionUnknown,
}
for _, cond := range revisionDesc.revision.Status.Conditions {
if cond.Type == apis.ConditionReady {
ready = cond
break
}
}
section := revSection.WriteColsLn(formatBullet(revisionDesc.percent, ready.Status), revisionHeader(revisionDesc))
if ready.Status == corev1.ConditionFalse {
section.WriteAttribute("Error", ready.Reason)
}
revision.WriteImage(section, revisionDesc.revision)
revision.WriteReplicas(section, revisionDesc.revision)
if printDetails {
revision.WritePort(section, revisionDesc.revision)
revision.WriteEnv(section, revisionDesc.revision, printDetails)
revision.WriteEnvFrom(section, revisionDesc.revision, printDetails)
revision.WriteScale(section, revisionDesc.revision)
revision.WriteConcurrencyOptions(section, revisionDesc.revision)
revision.WriteResources(section, revisionDesc.revision)
}
}
}
// ======================================================================================
// Helper functions
// Format the revision name along with its generation. Use colors if enabled.
func revisionHeader(desc *revisionDesc) string {
header := desc.revision.Name
if desc.latestTraffic != nil && *desc.latestTraffic {
header = fmt.Sprintf("@latest (%s)", desc.revision.Name)
} else if desc.latestReady {
header = desc.revision.Name + " (current @latest)"
} else if desc.latestCreated {
header = desc.revision.Name + " (latest created)"
}
if desc.tag != "" {
header = fmt.Sprintf("%s #%s", header, desc.tag)
}
return header + " " +
"[" + strconv.Itoa(desc.configurationGeneration) + "]" +
" " +
"(" + commands.Age(desc.revision.CreationTimestamp.Time) + ")"
}
// Format target percentage that it fits in the revision table
func formatBullet(percentage int64, status corev1.ConditionStatus) string {
symbol := "+"
switch status {
case corev1.ConditionTrue:
if percentage > 0 {
symbol = "%"
}
case corev1.ConditionFalse:
symbol = "!"
default:
symbol = "?"
}
if percentage == 0 {
return fmt.Sprintf(" %s", symbol)
}
return fmt.Sprintf("%3d%s", percentage, symbol)
}
// Call the backend to query revisions for the given service and build up
// the view objects used for output
func getRevisionDescriptions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, withDetails bool) ([]*revisionDesc, error) {
revisionsSeen := sets.NewString()
revisionDescs := []*revisionDesc{}
trafficTargets := service.Status.Traffic
var err error
for i := range trafficTargets {
target := trafficTargets[i]
revision, err := extractRevisionFromTarget(ctx, client, target)
if err != nil {
return nil, fmt.Errorf("cannot extract revision from service %s: %w", service.Name, err)
}
revisionsSeen.Insert(revision.Name)
desc, err := newRevisionDesc(*revision, &target, service)
if err != nil {
return nil, err
}
revisionDescs = append(revisionDescs, desc)
}
if revisionDescs, err = completeWithLatestRevisions(ctx, client, service, revisionsSeen, revisionDescs); err != nil {
return nil, err
}
if withDetails {
if revisionDescs, err = completeWithUntargetedRevisions(ctx, client, service, revisionsSeen, revisionDescs); err != nil {
return nil, err
}
}
return orderByConfigurationGeneration(revisionDescs), nil
}
// Order the list of revisions so that the newest revisions are at the top
func orderByConfigurationGeneration(descs []*revisionDesc) []*revisionDesc {
sort.SliceStable(descs, func(i, j int) bool {
return descs[i].configurationGeneration > descs[j].configurationGeneration
})
return descs
}
func completeWithLatestRevisions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, revisionsSeen sets.String, descs []*revisionDesc) ([]*revisionDesc, error) {
for _, revisionName := range []string{service.Status.LatestCreatedRevisionName, service.Status.LatestReadyRevisionName} {
if revisionName == "" || revisionsSeen.Has(revisionName) {
continue
}
revisionsSeen.Insert(revisionName)
rev, err := client.GetRevision(ctx, revisionName)
if err != nil {
return nil, err
}
newDesc, err := newRevisionDesc(*rev, nil, service)
if err != nil {
return nil, err
}
descs = append(descs, newDesc)
}
return descs, nil
}
func completeWithUntargetedRevisions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, revisionsSeen sets.String, descs []*revisionDesc) ([]*revisionDesc, error) {
revisions, err := client.ListRevisions(ctx, clientservingv1.WithService(service.Name))
if err != nil {
return nil, err
}
for _, revision := range revisions.Items {
if revisionsSeen.Has(revision.Name) {
continue
}
revisionsSeen.Insert(revision.Name)
newDesc, err := newRevisionDesc(revision, nil, service)
if err != nil {
return nil, err
}
descs = append(descs, newDesc)
}
return descs, nil
}
func newRevisionDesc(revision servingv1.Revision, target *servingv1.TrafficTarget, service *servingv1.Service) (*revisionDesc, error) { |
generation, err := strconv.ParseInt(revision.Labels[serving.ConfigurationGenerationLabelKey], 0, 0)
if err != nil {
return nil, fmt.Errorf("cannot extract configuration generation for revision %s: %w", revision.Name, err)
}
revisionDesc := revisionDesc{
revision: &revision,
configurationGeneration: int(generation),
latestCreated: revision.Name == service.Status.LatestCreatedRevisionName,
latestReady: revision.Name == service.Status.LatestReadyRevisionName,
}
addTargetInfo(&revisionDesc, target)
if err != nil {
return nil, err
}
return &revisionDesc, nil
}
| identifier_body |
|
describe.go | .AllowedFormats(), "url"), "|"))
return command
}
// Main action describing the service
func describe(w io.Writer, service *servingv1.Service, revisions []*revisionDesc, printDetails bool) error {
dw := printers.NewPrefixWriter(w)
// Service info
writeService(dw, service)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Revisions summary info
writeRevisions(dw, revisions, printDetails)
dw.WriteLine()
if err := dw.Flush(); err != nil {
return err
}
// Condition info
commands.WriteConditions(dw, service.Status.Conditions, printDetails)
if err := dw.Flush(); err != nil {
return err
}
return nil
}
// Write out main service information. Use colors for major items.
func writeService(dw printers.PrefixWriter, service *servingv1.Service) {
commands.WriteMetadata(dw, &service.ObjectMeta, printDetails)
dw.WriteAttribute("URL", extractURL(service))
if printDetails {
if service.Status.Address != nil {
url := service.Status.Address.URL
dw.WriteAttribute("Cluster", url.String())
}
}
if service.Spec.Template.Spec.ServiceAccountName != "" {
dw.WriteAttribute("Service Account", service.Spec.Template.Spec.ServiceAccountName)
}
if service.Spec.Template.Spec.ImagePullSecrets != nil {
dw.WriteAttribute("Image Pull Secret", service.Spec.Template.Spec.ImagePullSecrets[0].Name)
}
}
// Write out revisions associated with this service. By default only active
// target revisions are printed, but with --verbose also inactive revisions
// created by this services are shown
func writeRevisions(dw printers.PrefixWriter, revisions []*revisionDesc, printDetails bool) {
revSection := dw.WriteAttribute("Revisions", "")
dw.Flush()
for _, revisionDesc := range revisions {
ready := apis.Condition{
Type: apis.ConditionReady,
Status: corev1.ConditionUnknown,
}
for _, cond := range revisionDesc.revision.Status.Conditions {
if cond.Type == apis.ConditionReady {
ready = cond
break
}
}
section := revSection.WriteColsLn(formatBullet(revisionDesc.percent, ready.Status), revisionHeader(revisionDesc))
if ready.Status == corev1.ConditionFalse {
section.WriteAttribute("Error", ready.Reason)
}
revision.WriteImage(section, revisionDesc.revision)
revision.WriteReplicas(section, revisionDesc.revision)
if printDetails {
revision.WritePort(section, revisionDesc.revision)
revision.WriteEnv(section, revisionDesc.revision, printDetails)
revision.WriteEnvFrom(section, revisionDesc.revision, printDetails)
revision.WriteScale(section, revisionDesc.revision)
revision.WriteConcurrencyOptions(section, revisionDesc.revision)
revision.WriteResources(section, revisionDesc.revision)
}
}
}
// ======================================================================================
// Helper functions
// Format the revision name along with its generation. Use colors if enabled.
func revisionHeader(desc *revisionDesc) string {
header := desc.revision.Name
if desc.latestTraffic != nil && *desc.latestTraffic {
header = fmt.Sprintf("@latest (%s)", desc.revision.Name)
} else if desc.latestReady {
header = desc.revision.Name + " (current @latest)"
} else if desc.latestCreated {
header = desc.revision.Name + " (latest created)"
}
if desc.tag != "" {
header = fmt.Sprintf("%s #%s", header, desc.tag)
}
return header + " " +
"[" + strconv.Itoa(desc.configurationGeneration) + "]" +
" " +
"(" + commands.Age(desc.revision.CreationTimestamp.Time) + ")"
}
// Format target percentage that it fits in the revision table
func formatBullet(percentage int64, status corev1.ConditionStatus) string {
symbol := "+"
switch status {
case corev1.ConditionTrue:
if percentage > 0 {
symbol = "%"
}
case corev1.ConditionFalse:
symbol = "!"
default:
symbol = "?"
}
if percentage == 0 {
return fmt.Sprintf(" %s", symbol)
}
return fmt.Sprintf("%3d%s", percentage, symbol)
}
// Call the backend to query revisions for the given service and build up
// the view objects used for output
func getRevisionDescriptions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, withDetails bool) ([]*revisionDesc, error) {
revisionsSeen := sets.NewString()
revisionDescs := []*revisionDesc{}
trafficTargets := service.Status.Traffic
var err error
for i := range trafficTargets {
target := trafficTargets[i]
revision, err := extractRevisionFromTarget(ctx, client, target)
if err != nil {
return nil, fmt.Errorf("cannot extract revision from service %s: %w", service.Name, err)
}
revisionsSeen.Insert(revision.Name)
desc, err := newRevisionDesc(*revision, &target, service)
if err != nil {
return nil, err
}
revisionDescs = append(revisionDescs, desc)
}
if revisionDescs, err = completeWithLatestRevisions(ctx, client, service, revisionsSeen, revisionDescs); err != nil {
return nil, err
}
if withDetails {
if revisionDescs, err = completeWithUntargetedRevisions(ctx, client, service, revisionsSeen, revisionDescs); err != nil {
return nil, err
}
}
return orderByConfigurationGeneration(revisionDescs), nil
}
// Order the list of revisions so that the newest revisions are at the top
func orderByConfigurationGeneration(descs []*revisionDesc) []*revisionDesc {
sort.SliceStable(descs, func(i, j int) bool {
return descs[i].configurationGeneration > descs[j].configurationGeneration
})
return descs
}
func completeWithLatestRevisions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, revisionsSeen sets.String, descs []*revisionDesc) ([]*revisionDesc, error) {
for _, revisionName := range []string{service.Status.LatestCreatedRevisionName, service.Status.LatestReadyRevisionName} {
if revisionName == "" || revisionsSeen.Has(revisionName) {
continue
}
revisionsSeen.Insert(revisionName)
rev, err := client.GetRevision(ctx, revisionName)
if err != nil {
return nil, err
}
newDesc, err := newRevisionDesc(*rev, nil, service)
if err != nil {
return nil, err
}
descs = append(descs, newDesc)
}
return descs, nil
}
func completeWithUntargetedRevisions(ctx context.Context, client clientservingv1.KnServingClient, service *servingv1.Service, revisionsSeen sets.String, descs []*revisionDesc) ([]*revisionDesc, error) {
revisions, err := client.ListRevisions(ctx, clientservingv1.WithService(service.Name))
if err != nil {
return nil, err
}
for _, revision := range revisions.Items {
if revisionsSeen.Has(revision.Name) {
continue
}
revisionsSeen.Insert(revision.Name)
newDesc, err := newRevisionDesc(revision, nil, service)
if err != nil {
return nil, err
}
descs = append(descs, newDesc)
}
return descs, nil
}
func newRevisionDesc(revision servingv1.Revision, target *servingv1.TrafficTarget, service *servingv1.Service) (*revisionDesc, error) {
generation, err := strconv.ParseInt(revision.Labels[serving.ConfigurationGenerationLabelKey], 0, 0)
if err != nil {
return nil, fmt.Errorf("cannot extract configuration generation for revision %s: %w", revision.Name, err)
}
revisionDesc := revisionDesc{
revision: &revision,
configurationGeneration: int(generation),
latestCreated: revision.Name == service.Status.LatestCreatedRevisionName,
latestReady: revision.Name == service.Status.LatestReadyRevisionName,
}
addTargetInfo(&revisionDesc, target)
if err != nil {
return nil, err
}
return &revisionDesc, nil
}
func addTargetInfo(desc *revisionDesc, target *servingv1.TrafficTarget) {
if target != nil {
if target.Percent != nil {
desc.percent = *target.Percent
}
desc.latestTraffic = target.LatestRevision
desc.tag = target.Tag
}
}
func extractRevisionFromTarget(ctx context.Context, client clientservingv1.KnServingClient, target servingv1.TrafficTarget) (*servingv1.Revision, error) {
var revisionName = target.RevisionName
if revisionName == "" {
configurationName := target.ConfigurationName
if configurationName == "" {
return nil, fmt.Errorf("neither RevisionName nor ConfigurationName set")
}
configuration, err := client.GetConfiguration(ctx, configurationName)
if err != nil {
return nil, err
}
revisionName = configuration.Status.LatestCreatedRevisionName
}
return client.GetRevision(ctx, revisionName)
}
func e | xtractURL( | identifier_name |
|
update.go | "k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
)
// UpdateResource returns a function that will handle a resource update
func | (r rest.Updater, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Update", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
// timeout inside the parent context is lower than requestTimeoutUpperBound.
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
defer cancel()
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Update)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
options := &metav1.UpdateOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
if errs := validation.ValidateUpdateOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "UpdateOptions"}, "", errs)
scope.err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions"))
s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)
if err != nil {
scope.err(err, w, req)
return
}
defaultGVK := scope.Kind
original := r.New()
validationDirective := fieldValidation(options.FieldValidation)
decodeSerializer := s.Serializer
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
decodeSerializer = s.StrictSerializer
}
decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion)
span.AddEvent("About to convert to expected version")
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
if err != nil {
strictError, isStrictError := runtime.AsStrictDecodingError(err)
switch {
case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn:
addStrictDecodingWarnings(req.Context(), strictError.Errors())
case isStrictError && validationDirective == metav1.FieldValidationIgnore:
klog.Warningf("unexpected strict error when field validation is set to ignore")
fallthrough
default:
err = transformDecodeError(scope.Typer, err, original, gvk, body)
scope.err(err, w, req)
return
}
}
objGV := gvk.GroupVersion()
if !scope.AcceptsGroupVersion(objGV) {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", objGV, defaultGVK.GroupVersion()))
scope.err(err, w, req)
return
}
span.AddEvent("Conversion done")
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)
admit = admission.WithAudit(admit)
// if this object supports namespace info
if objectMeta, err := meta.Accessor(obj); err == nil {
// ensure namespace on the object is correct, or error if a conflicting namespace was set in the object
if err := rest.EnsureObjectNamespaceMatchesRequestNamespace(rest.ExpectedNamespaceForResource(namespace, scope.Resource), objectMeta); err != nil {
scope.err(err, w, req)
return
}
}
if err := checkName(obj, name, namespace, scope.Namer); err != nil {
scope.err(err, w, req)
return
}
userInfo, _ := request.UserFrom(ctx)
transformers := []rest.TransformFunc{}
// allows skipping managedFields update if the resulting object is too big
shouldUpdateManagedFields := true
admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)
transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) {
if shouldUpdateManagedFields {
return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil
}
return newObj, nil
})
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok {
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
isNotZeroObject, err := hasUID(oldObj)
if err != nil {
return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error())
} else if !isNotZeroObject {
if mutatingAdmission.Handles(admission.Create) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
} else {
if mutatingAdmission.Handles(admission.Update) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
}
return newObj, nil
})
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
// Dedup owner references again after mutating admission happens
dedupOwnerReferencesAndAddWarning(newObj, req.Context(), true)
return newObj, nil
})
}
createAuthorizerAttributes := authorizer.AttributesRecord{
User: userInfo,
ResourceRequest: true,
Path: req.URL.Path,
Verb: "create",
APIGroup: scope.Resource.Group,
APIVersion: scope.Resource.Version,
Resource: scope.Resource.Resource,
Subresource: scope.Subresource,
Namespace: namespace,
Name: name,
}
span.AddEvent("About to store object in database")
wasCreated := false
requestFunc := func() (runtime.Object, error) {
obj, created, err := r.Update(
ctx,
name,
rest.DefaultUpdatedObjectInfo(obj, transformers...),
withAuthorization(rest.AdmissionToValidateObjectFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope),
scope.Authorizer, createAuthorizerAttributes),
rest.AdmissionToValidateObjectUpdateFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope),
false,
options,
)
wasCreated = created
return obj, err
}
// Dedup owner references before updating managed fields
dedupOwnerReferencesAndAddWarning(obj, req.Context(), false)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
result, err := requestFunc()
// If the object wasn't committed to storage because it's serialized size was too large,
// it is safe to remove managedFields (which can be large) and try again.
if isTooLargeError(err) {
if accessor, accessorErr := meta.Access | UpdateResource | identifier_name |
update.go | "k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
)
// UpdateResource returns a function that will handle a resource update
func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Update", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
// timeout inside the parent context is lower than requestTimeoutUpperBound.
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
defer cancel()
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Update)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
options := &metav1.UpdateOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
if errs := validation.ValidateUpdateOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "UpdateOptions"}, "", errs)
scope.err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions"))
s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)
if err != nil {
scope.err(err, w, req)
return
}
defaultGVK := scope.Kind
original := r.New()
validationDirective := fieldValidation(options.FieldValidation)
decodeSerializer := s.Serializer
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
decodeSerializer = s.StrictSerializer
}
decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion)
span.AddEvent("About to convert to expected version")
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
if err != nil {
strictError, isStrictError := runtime.AsStrictDecodingError(err)
switch {
case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn:
addStrictDecodingWarnings(req.Context(), strictError.Errors())
case isStrictError && validationDirective == metav1.FieldValidationIgnore:
klog.Warningf("unexpected strict error when field validation is set to ignore")
fallthrough
default:
err = transformDecodeError(scope.Typer, err, original, gvk, body)
scope.err(err, w, req)
return
}
}
objGV := gvk.GroupVersion()
if !scope.AcceptsGroupVersion(objGV) {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", objGV, defaultGVK.GroupVersion()))
scope.err(err, w, req)
return
}
span.AddEvent("Conversion done")
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)
admit = admission.WithAudit(admit)
// if this object supports namespace info | return
}
}
if err := checkName(obj, name, namespace, scope.Namer); err != nil {
scope.err(err, w, req)
return
}
userInfo, _ := request.UserFrom(ctx)
transformers := []rest.TransformFunc{}
// allows skipping managedFields update if the resulting object is too big
shouldUpdateManagedFields := true
admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)
transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) {
if shouldUpdateManagedFields {
return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil
}
return newObj, nil
})
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok {
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
isNotZeroObject, err := hasUID(oldObj)
if err != nil {
return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error())
} else if !isNotZeroObject {
if mutatingAdmission.Handles(admission.Create) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
} else {
if mutatingAdmission.Handles(admission.Update) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
}
return newObj, nil
})
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
// Dedup owner references again after mutating admission happens
dedupOwnerReferencesAndAddWarning(newObj, req.Context(), true)
return newObj, nil
})
}
createAuthorizerAttributes := authorizer.AttributesRecord{
User: userInfo,
ResourceRequest: true,
Path: req.URL.Path,
Verb: "create",
APIGroup: scope.Resource.Group,
APIVersion: scope.Resource.Version,
Resource: scope.Resource.Resource,
Subresource: scope.Subresource,
Namespace: namespace,
Name: name,
}
span.AddEvent("About to store object in database")
wasCreated := false
requestFunc := func() (runtime.Object, error) {
obj, created, err := r.Update(
ctx,
name,
rest.DefaultUpdatedObjectInfo(obj, transformers...),
withAuthorization(rest.AdmissionToValidateObjectFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope),
scope.Authorizer, createAuthorizerAttributes),
rest.AdmissionToValidateObjectUpdateFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope),
false,
options,
)
wasCreated = created
return obj, err
}
// Dedup owner references before updating managed fields
dedupOwnerReferencesAndAddWarning(obj, req.Context(), false)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
result, err := requestFunc()
// If the object wasn't committed to storage because it's serialized size was too large,
// it is safe to remove managedFields (which can be large) and try again.
if isTooLargeError(err) {
if accessor, accessorErr := meta.Accessor(obj | if objectMeta, err := meta.Accessor(obj); err == nil {
// ensure namespace on the object is correct, or error if a conflicting namespace was set in the object
if err := rest.EnsureObjectNamespaceMatchesRequestNamespace(rest.ExpectedNamespaceForResource(namespace, scope.Resource), objectMeta); err != nil {
scope.err(err, w, req) | random_line_split |
update.go | "
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
)
// UpdateResource returns a function that will handle a resource update
func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Update", traceFields(req)...)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
if err != nil {
scope.err(err, w, req)
return
}
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
// timeout inside the parent context is lower than requestTimeoutUpperBound.
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
defer cancel()
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Update)
if err != nil {
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
options := &metav1.UpdateOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
scope.err(err, w, req)
return
}
if errs := validation.ValidateUpdateOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "UpdateOptions"}, "", errs)
scope.err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions"))
s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)
if err != nil {
scope.err(err, w, req)
return
}
defaultGVK := scope.Kind
original := r.New()
validationDirective := fieldValidation(options.FieldValidation)
decodeSerializer := s.Serializer
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
decodeSerializer = s.StrictSerializer
}
decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion)
span.AddEvent("About to convert to expected version")
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
if err != nil {
strictError, isStrictError := runtime.AsStrictDecodingError(err)
switch {
case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn:
addStrictDecodingWarnings(req.Context(), strictError.Errors())
case isStrictError && validationDirective == metav1.FieldValidationIgnore:
klog.Warningf("unexpected strict error when field validation is set to ignore")
fallthrough
default:
err = transformDecodeError(scope.Typer, err, original, gvk, body)
scope.err(err, w, req)
return
}
}
objGV := gvk.GroupVersion()
if !scope.AcceptsGroupVersion(objGV) {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", objGV, defaultGVK.GroupVersion()))
scope.err(err, w, req)
return
}
span.AddEvent("Conversion done")
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)
admit = admission.WithAudit(admit)
// if this object supports namespace info
if objectMeta, err := meta.Accessor(obj); err == nil {
// ensure namespace on the object is correct, or error if a conflicting namespace was set in the object
if err := rest.EnsureObjectNamespaceMatchesRequestNamespace(rest.ExpectedNamespaceForResource(namespace, scope.Resource), objectMeta); err != nil {
scope.err(err, w, req)
return
}
}
if err := checkName(obj, name, namespace, scope.Namer); err != nil {
scope.err(err, w, req)
return
}
userInfo, _ := request.UserFrom(ctx)
transformers := []rest.TransformFunc{}
// allows skipping managedFields update if the resulting object is too big
shouldUpdateManagedFields := true
admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)
transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) {
if shouldUpdateManagedFields {
return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil
}
return newObj, nil
})
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok {
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
isNotZeroObject, err := hasUID(oldObj)
if err != nil {
return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error())
} else if !isNotZeroObject {
if mutatingAdmission.Handles(admission.Create) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
} else {
if mutatingAdmission.Handles(admission.Update) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
}
return newObj, nil
})
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
// Dedup owner references again after mutating admission happens
dedupOwnerReferencesAndAddWarning(newObj, req.Context(), true)
return newObj, nil
})
}
createAuthorizerAttributes := authorizer.AttributesRecord{
User: userInfo,
ResourceRequest: true,
Path: req.URL.Path,
Verb: "create",
APIGroup: scope.Resource.Group,
APIVersion: scope.Resource.Version,
Resource: scope.Resource.Resource,
Subresource: scope.Subresource,
Namespace: namespace,
Name: name,
}
span.AddEvent("About to store object in database")
wasCreated := false
requestFunc := func() (runtime.Object, error) {
obj, created, err := r.Update(
ctx,
name,
rest.DefaultUpdatedObjectInfo(obj, transformers...),
withAuthorization(rest.AdmissionToValidateObjectFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope),
scope.Authorizer, createAuthorizerAttributes),
rest.AdmissionToValidateObjectUpdateFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope),
false,
options,
)
wasCreated = created
return obj, err
}
// Dedup owner references before updating managed fields
dedupOwnerReferencesAndAddWarning(obj, req.Context(), false)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
result, err := requestFunc()
// If the object wasn't committed to storage because it's serialized size was too large,
// it is safe to remove managedFields (which can be large) and try again.
if isTooLargeError(err) | {
if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {
accessor.SetManagedFields(nil)
shouldUpdateManagedFields = false
result, err = requestFunc()
}
} | conditional_block |
|
update.go | .err(err, w, req)
return
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions"))
s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)
if err != nil {
scope.err(err, w, req)
return
}
defaultGVK := scope.Kind
original := r.New()
validationDirective := fieldValidation(options.FieldValidation)
decodeSerializer := s.Serializer
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
decodeSerializer = s.StrictSerializer
}
decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion)
span.AddEvent("About to convert to expected version")
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
if err != nil {
strictError, isStrictError := runtime.AsStrictDecodingError(err)
switch {
case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn:
addStrictDecodingWarnings(req.Context(), strictError.Errors())
case isStrictError && validationDirective == metav1.FieldValidationIgnore:
klog.Warningf("unexpected strict error when field validation is set to ignore")
fallthrough
default:
err = transformDecodeError(scope.Typer, err, original, gvk, body)
scope.err(err, w, req)
return
}
}
objGV := gvk.GroupVersion()
if !scope.AcceptsGroupVersion(objGV) {
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", objGV, defaultGVK.GroupVersion()))
scope.err(err, w, req)
return
}
span.AddEvent("Conversion done")
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)
admit = admission.WithAudit(admit)
// if this object supports namespace info
if objectMeta, err := meta.Accessor(obj); err == nil {
// ensure namespace on the object is correct, or error if a conflicting namespace was set in the object
if err := rest.EnsureObjectNamespaceMatchesRequestNamespace(rest.ExpectedNamespaceForResource(namespace, scope.Resource), objectMeta); err != nil {
scope.err(err, w, req)
return
}
}
if err := checkName(obj, name, namespace, scope.Namer); err != nil {
scope.err(err, w, req)
return
}
userInfo, _ := request.UserFrom(ctx)
transformers := []rest.TransformFunc{}
// allows skipping managedFields update if the resulting object is too big
shouldUpdateManagedFields := true
admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)
transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) {
if shouldUpdateManagedFields {
return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil
}
return newObj, nil
})
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok {
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
isNotZeroObject, err := hasUID(oldObj)
if err != nil {
return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error())
} else if !isNotZeroObject {
if mutatingAdmission.Handles(admission.Create) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
} else {
if mutatingAdmission.Handles(admission.Update) {
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope)
}
}
return newObj, nil
})
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
// Dedup owner references again after mutating admission happens
dedupOwnerReferencesAndAddWarning(newObj, req.Context(), true)
return newObj, nil
})
}
createAuthorizerAttributes := authorizer.AttributesRecord{
User: userInfo,
ResourceRequest: true,
Path: req.URL.Path,
Verb: "create",
APIGroup: scope.Resource.Group,
APIVersion: scope.Resource.Version,
Resource: scope.Resource.Resource,
Subresource: scope.Subresource,
Namespace: namespace,
Name: name,
}
span.AddEvent("About to store object in database")
wasCreated := false
requestFunc := func() (runtime.Object, error) {
obj, created, err := r.Update(
ctx,
name,
rest.DefaultUpdatedObjectInfo(obj, transformers...),
withAuthorization(rest.AdmissionToValidateObjectFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope),
scope.Authorizer, createAuthorizerAttributes),
rest.AdmissionToValidateObjectUpdateFunc(
admit,
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope),
false,
options,
)
wasCreated = created
return obj, err
}
// Dedup owner references before updating managed fields
dedupOwnerReferencesAndAddWarning(obj, req.Context(), false)
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
result, err := requestFunc()
// If the object wasn't committed to storage because it's serialized size was too large,
// it is safe to remove managedFields (which can be large) and try again.
if isTooLargeError(err) {
if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {
accessor.SetManagedFields(nil)
shouldUpdateManagedFields = false
result, err = requestFunc()
}
}
return result, err
})
if err != nil {
span.AddEvent("Write to database call failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
scope.err(err, w, req)
return
}
span.AddEvent("Write to database call succeeded", attribute.Int("len", len(body)))
status := http.StatusOK
if wasCreated {
status = http.StatusCreated
}
span.AddEvent("About to write a response")
defer span.AddEvent("Writing http response done")
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
}
}
func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer, attributes authorizer.Attributes) rest.ValidateObjectFunc {
var once sync.Once
var authorizerDecision authorizer.Decision
var authorizerReason string
var authorizerErr error
return func(ctx context.Context, obj runtime.Object) error {
if a == nil {
return errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize a create on update"))
}
once.Do(func() {
authorizerDecision, authorizerReason, authorizerErr = a.Authorize(ctx, attributes)
})
// an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here.
if authorizerDecision == authorizer.DecisionAllow {
// Continue to validating admission
return validate(ctx, obj)
}
if authorizerErr != nil {
return errors.NewInternalError(authorizerErr)
}
// The user is not authorized to perform this action, so we need to build the error response
gr := schema.GroupResource{
Group: attributes.GetAPIGroup(),
Resource: attributes.GetResource(),
}
name := attributes.GetName()
err := fmt.Errorf("%v", authorizerReason)
return errors.NewForbidden(gr, name, err)
}
}
// updateToCreateOptions creates a CreateOptions with the same field values as the provided UpdateOptions.
func updateToCreateOptions(uo *metav1.UpdateOptions) *metav1.CreateOptions | {
if uo == nil {
return nil
}
co := &metav1.CreateOptions{
DryRun: uo.DryRun,
FieldManager: uo.FieldManager,
FieldValidation: uo.FieldValidation,
}
co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions"))
return co
} | identifier_body |
|
plugin.go | for containers
var envDefault []core_v1.EnvVar
// ASAP public key servers
// we want every container to know where to get the public keys
// regardless if they're using ASAP or not
envDefault = append(envDefault, asapkey.GetPublicKeyRepoEnvVars(context.StateContext.Location)...)
// always bind to the common secret, it's OK if it doesn't exist
trueVar := true
commonEnvFrom := core_v1.EnvFromSource{
SecretRef: &core_v1.SecretEnvSource{
LocalObjectReference: core_v1.LocalObjectReference{
Name: apik8scompute.CommonSecretName,
},
Optional: &trueVar,
},
}
envFrom = append(envFrom, commonEnvFrom)
// prepare containers
containers := buildContainers(spec, envDefault, envFrom)
labelMap := map[string]string{
stateNameLabel: context.StateMeta.Name,
resourceNameLabel: string(resource.Name),
}
podSpec := buildPodSpec(containers, serviceAccountNameRef.Ref())
// The kube deployment object spec
deploymentSpec := buildDeploymentSpec(context, spec, podSpec, labelMap, iamRoleRef)
// The final wired deployment object
deployment := smith_v1.Resource{
Name: wiringutil.ResourceName(resource.Name),
References: references,
Spec: smith_v1.ResourceSpec{
Object: &apps_v1.Deployment{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.DeploymentKind,
APIVersion: apps_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaName(resource.Name),
},
Spec: deploymentSpec,
},
},
}
smithResources = append(smithResources, deployment)
deploymentNameRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(deployment.Name, metadataElement, nameElement),
Resource: deployment.Name,
Path: metadataNamePath,
}
// 0 value for replicas indicates we don't need an HPA
if spec.Scaling.MinReplicas > 0 && spec.Scaling.MaxReplicas > 0 {
hpaSpec := buildHorizontalPodAutoscalerSpec(spec, deploymentNameRef.Ref())
// The final wired HPA object
hpa := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(resource.Name, hpaPostfix),
References: []smith_v1.Reference{deploymentNameRef},
Spec: smith_v1.ResourceSpec{
Object: &autoscaling_v2b1.HorizontalPodAutoscaler{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.HorizontalPodAutoscalerKind,
APIVersion: autoscaling_v2b1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaNameWithPostfix(resource.Name, hpaPostfix),
},
Spec: hpaSpec,
},
},
}
smithResources = append(smithResources, hpa)
}
result := &wiringplugin.WiringResult{
Contract: wiringplugin.ResourceContract{
Shapes: []wiringplugin.Shape{
knownshapes.NewSetOfPodsSelectableByLabels(deployment.Name, labelMap),
},
},
Resources: smithResources,
}
return result, false, nil
}
func generateSecretResource(compute voyager.ResourceName, envVars map[string]string, dependencyReferences []smith_v1.Reference) (smith_v1.Resource, error) {
secretData := make(map[string][]byte, len(envVars))
for key, val := range envVars {
secretData[key] = []byte(val)
}
secretPluginSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secretplugin.Spec{
Data: secretData,
})
if err != nil {
return smith_v1.Resource{}, errors.WithStack(err)
}
instanceResource := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(compute, secretPluginNamePostfix),
References: dependencyReferences,
Spec: smith_v1.ResourceSpec{
Plugin: &smith_v1.PluginSpec{
Name: secretplugin.PluginName,
ObjectName: wiringutil.MetaNameWithPostfix(compute, secretPluginNamePostfix),
Spec: secretPluginSpec,
},
},
}
return instanceResource, nil
}
func generateSecretEnvVarsResource(compute voyager.ResourceName, renameEnvVar map[string]string, dependencyReferences []smith_v1.Reference) (smith_v1.Resource, error) {
secretEnvVarPluginSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secretenvvar.PodSpec{
IgnoreKeyRegex: envVarIgnoreRegex,
RenameEnvVar: renameEnvVar,
})
if err != nil {
return smith_v1.Resource{}, err
}
// We use objectName for both the smith resource name and the kubernetes metadata name,
// since there's only one of these per state resource (no possibility of clash).
instanceResource := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(compute, podSecretEnvVarNamePostfix),
References: dependencyReferences,
Spec: smith_v1.ResourceSpec{
Plugin: &smith_v1.PluginSpec{
Name: podSecretEnvVarPluginTypeName,
ObjectName: wiringutil.MetaNameWithPostfix(compute, podSecretEnvVarNamePostfix),
Spec: secretEnvVarPluginSpec,
},
},
}
return instanceResource, nil
}
func buildPodSpec(containers []core_v1.Container, serviceAccountName string) core_v1.PodSpec {
var terminationGracePeriodSeconds int64 = 30
return core_v1.PodSpec{
Containers: containers,
ServiceAccountName: serviceAccountName,
// field with default values
DNSPolicy: "ClusterFirst",
RestartPolicy: "Always",
SchedulerName: "default-scheduler",
SecurityContext: &core_v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
}
}
func buildDeploymentSpec(context *wiringplugin.WiringContext, spec *Spec, podSpec core_v1.PodSpec, labelMap map[string]string, iamRoleRef *smith_v1.Reference) apps_v1.DeploymentSpec {
progressDeadlineSeconds := int32(600)
revisionHistoryLimit := int32(0)
annotations := map[string]string{
kittBusinessUnitAnnotation: context.StateContext.ServiceProperties.BusinessUnit,
loggingIDAnnotation: context.StateContext.ServiceProperties.LoggingID,
resourceOwnerAnnotation: context.StateContext.ServiceProperties.ResourceOwner,
}
if iamRoleRef != nil {
annotations[kube2iamAnnotation] = iamRoleRef.Ref()
}
// Set replicas to the scaling min, ensure there is at least 1 replica
replicas := spec.Scaling.MinReplicas
if replicas == 0 {
replicas = 1
}
return apps_v1.DeploymentSpec{
Selector: &meta_v1.LabelSelector{
MatchLabels: labelMap,
},
Replicas: &replicas,
Template: core_v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Labels: labelMap,
Annotations: annotations,
},
Spec: podSpec,
},
// fields which default values
Strategy: apps_v1.DeploymentStrategy{
Type: "RollingUpdate",
RollingUpdate: &apps_v1.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{
Type: intstr.String,
StrVal: "25%",
},
MaxSurge: &intstr.IntOrString{
Type: intstr.String,
StrVal: "25%",
},
},
},
ProgressDeadlineSeconds: &progressDeadlineSeconds,
RevisionHistoryLimit: &revisionHistoryLimit,
}
}
func buildContainers(spec *Spec, envDefault []core_v1.EnvVar, envFrom []core_v1.EnvFromSource) []core_v1.Container {
containers := make([]core_v1.Container, 0, len(spec.Containers))
for _, container := range spec.Containers {
containers = append(containers, container.ToKubeContainer(envDefault, envFrom))
}
return containers
}
func buildHorizontalPodAutoscalerSpec(spec *Spec, deploymentName string) autoscaling_v2b1.HorizontalPodAutoscalerSpec {
metrics := make([]autoscaling_v2b1.MetricSpec, len(spec.Scaling.Metrics))
for i, m := range spec.Scaling.Metrics {
metrics[i] = m.ToKubeMetric()
}
return autoscaling_v2b1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscaling_v2b1.CrossVersionObjectReference{
APIVersion: apps_v1.SchemeGroupVersion.String(),
Kind: k8s.DeploymentKind,
Name: deploymentName,
},
MinReplicas: &spec.Scaling.MinReplicas,
MaxReplicas: spec.Scaling.MaxReplicas,
Metrics: metrics,
}
}
func | buildKube2iamRoles | identifier_name |
|
plugin.go | Ref)
// default env vars for containers
var envDefault []core_v1.EnvVar
// ASAP public key servers
// we want every container to know where to get the public keys
// regardless if they're using ASAP or not
envDefault = append(envDefault, asapkey.GetPublicKeyRepoEnvVars(context.StateContext.Location)...)
// always bind to the common secret, it's OK if it doesn't exist
trueVar := true
commonEnvFrom := core_v1.EnvFromSource{
SecretRef: &core_v1.SecretEnvSource{
LocalObjectReference: core_v1.LocalObjectReference{
Name: apik8scompute.CommonSecretName,
},
Optional: &trueVar,
},
}
envFrom = append(envFrom, commonEnvFrom)
// prepare containers
containers := buildContainers(spec, envDefault, envFrom)
labelMap := map[string]string{
stateNameLabel: context.StateMeta.Name,
resourceNameLabel: string(resource.Name),
}
podSpec := buildPodSpec(containers, serviceAccountNameRef.Ref())
// The kube deployment object spec
deploymentSpec := buildDeploymentSpec(context, spec, podSpec, labelMap, iamRoleRef)
// The final wired deployment object
deployment := smith_v1.Resource{
Name: wiringutil.ResourceName(resource.Name),
References: references,
Spec: smith_v1.ResourceSpec{
Object: &apps_v1.Deployment{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.DeploymentKind,
APIVersion: apps_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaName(resource.Name),
},
Spec: deploymentSpec,
},
},
}
smithResources = append(smithResources, deployment)
deploymentNameRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(deployment.Name, metadataElement, nameElement),
Resource: deployment.Name,
Path: metadataNamePath,
}
// 0 value for replicas indicates we don't need an HPA
if spec.Scaling.MinReplicas > 0 && spec.Scaling.MaxReplicas > 0 {
hpaSpec := buildHorizontalPodAutoscalerSpec(spec, deploymentNameRef.Ref())
// The final wired HPA object
hpa := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(resource.Name, hpaPostfix),
References: []smith_v1.Reference{deploymentNameRef},
Spec: smith_v1.ResourceSpec{
Object: &autoscaling_v2b1.HorizontalPodAutoscaler{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.HorizontalPodAutoscalerKind,
APIVersion: autoscaling_v2b1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaNameWithPostfix(resource.Name, hpaPostfix),
},
Spec: hpaSpec,
},
},
}
smithResources = append(smithResources, hpa)
}
result := &wiringplugin.WiringResult{
Contract: wiringplugin.ResourceContract{
Shapes: []wiringplugin.Shape{
knownshapes.NewSetOfPodsSelectableByLabels(deployment.Name, labelMap),
},
},
Resources: smithResources,
}
return result, false, nil
}
func generateSecretResource(compute voyager.ResourceName, envVars map[string]string, dependencyReferences []smith_v1.Reference) (smith_v1.Resource, error) {
secretData := make(map[string][]byte, len(envVars))
for key, val := range envVars {
secretData[key] = []byte(val)
}
secretPluginSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secretplugin.Spec{
Data: secretData,
})
if err != nil {
return smith_v1.Resource{}, errors.WithStack(err)
}
instanceResource := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(compute, secretPluginNamePostfix),
References: dependencyReferences,
Spec: smith_v1.ResourceSpec{
Plugin: &smith_v1.PluginSpec{
Name: secretplugin.PluginName,
ObjectName: wiringutil.MetaNameWithPostfix(compute, secretPluginNamePostfix),
Spec: secretPluginSpec,
},
},
}
return instanceResource, nil
}
func generateSecretEnvVarsResource(compute voyager.ResourceName, renameEnvVar map[string]string, dependencyReferences []smith_v1.Reference) (smith_v1.Resource, error) {
secretEnvVarPluginSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secretenvvar.PodSpec{
IgnoreKeyRegex: envVarIgnoreRegex,
RenameEnvVar: renameEnvVar,
})
if err != nil {
return smith_v1.Resource{}, err
}
// We use objectName for both the smith resource name and the kubernetes metadata name,
// since there's only one of these per state resource (no possibility of clash).
instanceResource := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(compute, podSecretEnvVarNamePostfix),
References: dependencyReferences,
Spec: smith_v1.ResourceSpec{
Plugin: &smith_v1.PluginSpec{
Name: podSecretEnvVarPluginTypeName,
ObjectName: wiringutil.MetaNameWithPostfix(compute, podSecretEnvVarNamePostfix),
Spec: secretEnvVarPluginSpec,
},
},
}
return instanceResource, nil
}
func buildPodSpec(containers []core_v1.Container, serviceAccountName string) core_v1.PodSpec {
var terminationGracePeriodSeconds int64 = 30
return core_v1.PodSpec{
Containers: containers,
ServiceAccountName: serviceAccountName,
// field with default values
DNSPolicy: "ClusterFirst",
RestartPolicy: "Always",
SchedulerName: "default-scheduler",
SecurityContext: &core_v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
}
}
func buildDeploymentSpec(context *wiringplugin.WiringContext, spec *Spec, podSpec core_v1.PodSpec, labelMap map[string]string, iamRoleRef *smith_v1.Reference) apps_v1.DeploymentSpec {
progressDeadlineSeconds := int32(600)
revisionHistoryLimit := int32(0)
annotations := map[string]string{
kittBusinessUnitAnnotation: context.StateContext.ServiceProperties.BusinessUnit,
loggingIDAnnotation: context.StateContext.ServiceProperties.LoggingID,
resourceOwnerAnnotation: context.StateContext.ServiceProperties.ResourceOwner,
}
if iamRoleRef != nil {
annotations[kube2iamAnnotation] = iamRoleRef.Ref()
}
// Set replicas to the scaling min, ensure there is at least 1 replica
replicas := spec.Scaling.MinReplicas
if replicas == 0 {
replicas = 1
}
return apps_v1.DeploymentSpec{
Selector: &meta_v1.LabelSelector{
MatchLabels: labelMap,
},
Replicas: &replicas,
Template: core_v1.PodTemplateSpec{
ObjectMeta: meta_v1.ObjectMeta{
Labels: labelMap,
Annotations: annotations,
},
Spec: podSpec,
},
// fields which default values
Strategy: apps_v1.DeploymentStrategy{
Type: "RollingUpdate",
RollingUpdate: &apps_v1.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{
Type: intstr.String,
StrVal: "25%",
},
MaxSurge: &intstr.IntOrString{
Type: intstr.String,
StrVal: "25%",
},
},
},
ProgressDeadlineSeconds: &progressDeadlineSeconds,
RevisionHistoryLimit: &revisionHistoryLimit,
}
}
func buildContainers(spec *Spec, envDefault []core_v1.EnvVar, envFrom []core_v1.EnvFromSource) []core_v1.Container {
containers := make([]core_v1.Container, 0, len(spec.Containers))
for _, container := range spec.Containers {
containers = append(containers, container.ToKubeContainer(envDefault, envFrom))
}
return containers
}
func buildHorizontalPodAutoscalerSpec(spec *Spec, deploymentName string) autoscaling_v2b1.HorizontalPodAutoscalerSpec | {
metrics := make([]autoscaling_v2b1.MetricSpec, len(spec.Scaling.Metrics))
for i, m := range spec.Scaling.Metrics {
metrics[i] = m.ToKubeMetric()
}
return autoscaling_v2b1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscaling_v2b1.CrossVersionObjectReference{
APIVersion: apps_v1.SchemeGroupVersion.String(),
Kind: k8s.DeploymentKind,
Name: deploymentName,
},
MinReplicas: &spec.Scaling.MinReplicas,
MaxReplicas: spec.Scaling.MaxReplicas,
Metrics: metrics,
}
} | identifier_body |
|
plugin.go | nil
}
func validateScaling(s Scaling) error {
// we only need to validate when min & max are both positive;
// if one or more are 0, it means we provision a deployment with no HPA
if s.MinReplicas > 0 &&
s.MaxReplicas > 0 &&
s.MinReplicas > s.MaxReplicas {
return errors.Errorf("MaxReplicas (%d) must be greater than MinReplicas (%d)", s.MaxReplicas, s.MinReplicas)
}
return nil
}
// WireUp is the main autowiring function for the K8SCompute resource, building a native kube deployment and HPA
func WireUp(resource *orch_v1.StateResource, context *wiringplugin.WiringContext) (*wiringplugin.WiringResult, bool /*retriable*/, error) {
if resource.Type != apik8scompute.ResourceType {
return nil, false, errors.Errorf("invalid resource type: %q", resource.Type)
}
// Validate ASAP dependencies
asapDependencyCount := 0
for _, dep := range context.Dependencies {
if dep.Type == asapkey.ResourceType {
// Only allow one asap key dependency per compute
// so we can use same micros1 env var names and facilitate migration
if asapDependencyCount++; asapDependencyCount > 1 {
return nil, false, errors.Errorf("cannot depend on more than one asap key resource")
}
}
}
// Parse spec and apply defaults
spec := &Spec{}
if err := resource.SpecIntoTyped(spec); err != nil {
return nil, false, err
}
// Apply the defaults from the resource state
// Defaults are defined in the formation layer
if err := spec.ApplyDefaults(resource.Defaults); err != nil {
return nil, false, err
}
if err := validateSpec(spec); err != nil {
return nil, false, err
}
// Prepare environment variables
var envFrom []core_v1.EnvFromSource
var smithResources []smith_v1.Resource
var bindingResult []compute.BindingResult
references := make([]smith_v1.Reference, 0, len(context.Dependencies))
for _, dep := range context.Dependencies {
bindableShape, found, err := knownshapes.FindBindableEnvironmentVariablesShape(dep.Contract.Shapes)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, errors.Errorf("cannot depend on resource %q of type %q, only dependencies providing shape %q are supported", dep.Name, dep.Type, knownshapes.BindableEnvironmentVariablesShape)
}
resourceReference := bindableShape.Data.ServiceInstanceName
binding := wiringutil.ConsumerProducerServiceBinding(resource.Name, dep.Name, resourceReference)
smithResources = append(smithResources, binding)
bindingResult = append(bindingResult, compute.BindingResult{
ResourceName: dep.Name,
BindableEnvVarShape: *bindableShape,
CreatedBindingFromShape: binding,
})
}
var iamRoleRef *smith_v1.Reference
bindingReferences := make([]smith_v1.Reference, 0, len(bindingResult))
shouldUseSecretPlugin := true
for _, res := range bindingResult {
ref := smith_v1.Reference{
Resource: res.CreatedBindingFromShape.Name,
}
bindingReferences = append(bindingReferences, ref)
if res.BindableEnvVarShape.Data.Vars == nil {
shouldUseSecretPlugin = false
}
}
// Reference environment variables retrieved from ServiceBinding objects
if len(bindingResult) > 0 {
var secretResource smith_v1.Resource
var err error
if shouldUseSecretPlugin {
secretRefs, envVars, err := compute.GenerateEnvVars(spec.RenameEnvVar, bindingResult)
if err != nil {
return nil, false, err
}
secretResource, err = generateSecretResource(resource.Name, envVars, secretRefs)
if err != nil {
return nil, false, err
}
} else {
secretResource, err = generateSecretEnvVarsResource(resource.Name, spec.RenameEnvVar, bindingReferences)
if err != nil {
return nil, false, err
}
}
| Resource: secretResource.Name,
Path: metadataNamePath,
}
falseObj := false
envFromSource := core_v1.EnvFromSource{
SecretRef: &core_v1.SecretEnvSource{
LocalObjectReference: core_v1.LocalObjectReference{
Name: secretRef.Ref(),
},
Optional: &falseObj,
},
}
envFrom = append(envFrom, envFromSource)
references = append(references, secretRef)
smithResources = append(smithResources, secretResource)
iamPluginInstanceSmithResource, err := iam.PluginServiceInstance(iam.KubeComputeType, resource.Name,
context.StateContext.ServiceName, false, bindingReferences, context, []string{}, buildKube2iamRoles(context))
if err != nil {
return nil, false, err
}
iamPluginBindingSmithResource := iam.ServiceBinding(resource.Name, iamPluginInstanceSmithResource.Name)
iamRoleRef = &smith_v1.Reference{
Name: wiringutil.ReferenceName(iamPluginBindingSmithResource.Name, bindingOutputRoleARNKey),
Resource: iamPluginBindingSmithResource.Name,
Path: fmt.Sprintf("data.%s", bindingOutputRoleARNKey),
Example: "arn:aws:iam::123456789012:role/path/role",
Modifier: smith_v1.ReferenceModifierBindSecret,
}
references = append(references, *iamRoleRef)
smithResources = append(smithResources, iamPluginInstanceSmithResource, iamPluginBindingSmithResource)
}
serviceAccountResource := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(resource.Name, serviceAccountPostFix),
Spec: smith_v1.ResourceSpec{
Object: &core_v1.ServiceAccount{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.ServiceAccountKind,
APIVersion: core_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaNameWithPostfix(resource.Name, serviceAccountPostFix),
},
ImagePullSecrets: []core_v1.LocalObjectReference{{Name: apik8scompute.DockerImagePullName}},
},
},
}
smithResources = append(smithResources, serviceAccountResource)
serviceAccountNameRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(serviceAccountResource.Name, metadataElement, nameElement),
Resource: serviceAccountResource.Name,
Path: metadataNamePath,
}
references = append(references, serviceAccountNameRef)
// default env vars for containers
var envDefault []core_v1.EnvVar
// ASAP public key servers
// we want every container to know where to get the public keys
// regardless if they're using ASAP or not
envDefault = append(envDefault, asapkey.GetPublicKeyRepoEnvVars(context.StateContext.Location)...)
// always bind to the common secret, it's OK if it doesn't exist
trueVar := true
commonEnvFrom := core_v1.EnvFromSource{
SecretRef: &core_v1.SecretEnvSource{
LocalObjectReference: core_v1.LocalObjectReference{
Name: apik8scompute.CommonSecretName,
},
Optional: &trueVar,
},
}
envFrom = append(envFrom, commonEnvFrom)
// prepare containers
containers := buildContainers(spec, envDefault, envFrom)
labelMap := map[string]string{
stateNameLabel: context.StateMeta.Name,
resourceNameLabel: string(resource.Name),
}
podSpec := buildPodSpec(containers, serviceAccountNameRef.Ref())
// The kube deployment object spec
deploymentSpec := buildDeploymentSpec(context, spec, podSpec, labelMap, iamRoleRef)
// The final wired deployment object
deployment := smith_v1.Resource{
Name: wiringutil.ResourceName(resource.Name),
References: references,
Spec: smith_v1.ResourceSpec{
Object: &apps_v1.Deployment{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.DeploymentKind,
APIVersion: apps_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaName(resource.Name),
},
Spec: deploymentSpec,
},
},
}
smithResources = append(smithResources, deployment)
deploymentNameRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(deployment.Name, metadataElement, nameElement),
Resource: deployment.Name,
Path: metadataNamePath,
}
// 0 value for replicas indicates we don't need an HPA
if spec.Scaling.MinReplicas > 0 && spec.Scaling.MaxReplicas > 0 {
hpaSpec := buildHorizontalPodAutoscalerSpec(spec, deploymentNameRef.Ref())
// The final wired HPA object
hpa := smith_v | secretRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(secretResource.Name, metadataElement, nameElement), | random_line_split |
plugin.go | nil
}
func validateScaling(s Scaling) error {
// we only need to validate when min & max are both positive;
// if one or more are 0, it means we provision a deployment with no HPA
if s.MinReplicas > 0 &&
s.MaxReplicas > 0 &&
s.MinReplicas > s.MaxReplicas {
return errors.Errorf("MaxReplicas (%d) must be greater than MinReplicas (%d)", s.MaxReplicas, s.MinReplicas)
}
return nil
}
// WireUp is the main autowiring function for the K8SCompute resource, building a native kube deployment and HPA
func WireUp(resource *orch_v1.StateResource, context *wiringplugin.WiringContext) (*wiringplugin.WiringResult, bool /*retriable*/, error) {
if resource.Type != apik8scompute.ResourceType {
return nil, false, errors.Errorf("invalid resource type: %q", resource.Type)
}
// Validate ASAP dependencies
asapDependencyCount := 0
for _, dep := range context.Dependencies {
if dep.Type == asapkey.ResourceType {
// Only allow one asap key dependency per compute
// so we can use same micros1 env var names and facilitate migration
if asapDependencyCount++; asapDependencyCount > 1 {
return nil, false, errors.Errorf("cannot depend on more than one asap key resource")
}
}
}
// Parse spec and apply defaults
spec := &Spec{}
if err := resource.SpecIntoTyped(spec); err != nil {
return nil, false, err
}
// Apply the defaults from the resource state
// Defaults are defined in the formation layer
if err := spec.ApplyDefaults(resource.Defaults); err != nil {
return nil, false, err
}
if err := validateSpec(spec); err != nil {
return nil, false, err
}
// Prepare environment variables
var envFrom []core_v1.EnvFromSource
var smithResources []smith_v1.Resource
var bindingResult []compute.BindingResult
references := make([]smith_v1.Reference, 0, len(context.Dependencies))
for _, dep := range context.Dependencies {
bindableShape, found, err := knownshapes.FindBindableEnvironmentVariablesShape(dep.Contract.Shapes)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, errors.Errorf("cannot depend on resource %q of type %q, only dependencies providing shape %q are supported", dep.Name, dep.Type, knownshapes.BindableEnvironmentVariablesShape)
}
resourceReference := bindableShape.Data.ServiceInstanceName
binding := wiringutil.ConsumerProducerServiceBinding(resource.Name, dep.Name, resourceReference)
smithResources = append(smithResources, binding)
bindingResult = append(bindingResult, compute.BindingResult{
ResourceName: dep.Name,
BindableEnvVarShape: *bindableShape,
CreatedBindingFromShape: binding,
})
}
var iamRoleRef *smith_v1.Reference
bindingReferences := make([]smith_v1.Reference, 0, len(bindingResult))
shouldUseSecretPlugin := true
for _, res := range bindingResult {
ref := smith_v1.Reference{
Resource: res.CreatedBindingFromShape.Name,
}
bindingReferences = append(bindingReferences, ref)
if res.BindableEnvVarShape.Data.Vars == nil |
}
// Reference environment variables retrieved from ServiceBinding objects
if len(bindingResult) > 0 {
var secretResource smith_v1.Resource
var err error
if shouldUseSecretPlugin {
secretRefs, envVars, err := compute.GenerateEnvVars(spec.RenameEnvVar, bindingResult)
if err != nil {
return nil, false, err
}
secretResource, err = generateSecretResource(resource.Name, envVars, secretRefs)
if err != nil {
return nil, false, err
}
} else {
secretResource, err = generateSecretEnvVarsResource(resource.Name, spec.RenameEnvVar, bindingReferences)
if err != nil {
return nil, false, err
}
}
secretRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(secretResource.Name, metadataElement, nameElement),
Resource: secretResource.Name,
Path: metadataNamePath,
}
falseObj := false
envFromSource := core_v1.EnvFromSource{
SecretRef: &core_v1.SecretEnvSource{
LocalObjectReference: core_v1.LocalObjectReference{
Name: secretRef.Ref(),
},
Optional: &falseObj,
},
}
envFrom = append(envFrom, envFromSource)
references = append(references, secretRef)
smithResources = append(smithResources, secretResource)
iamPluginInstanceSmithResource, err := iam.PluginServiceInstance(iam.KubeComputeType, resource.Name,
context.StateContext.ServiceName, false, bindingReferences, context, []string{}, buildKube2iamRoles(context))
if err != nil {
return nil, false, err
}
iamPluginBindingSmithResource := iam.ServiceBinding(resource.Name, iamPluginInstanceSmithResource.Name)
iamRoleRef = &smith_v1.Reference{
Name: wiringutil.ReferenceName(iamPluginBindingSmithResource.Name, bindingOutputRoleARNKey),
Resource: iamPluginBindingSmithResource.Name,
Path: fmt.Sprintf("data.%s", bindingOutputRoleARNKey),
Example: "arn:aws:iam::123456789012:role/path/role",
Modifier: smith_v1.ReferenceModifierBindSecret,
}
references = append(references, *iamRoleRef)
smithResources = append(smithResources, iamPluginInstanceSmithResource, iamPluginBindingSmithResource)
}
serviceAccountResource := smith_v1.Resource{
Name: wiringutil.ResourceNameWithPostfix(resource.Name, serviceAccountPostFix),
Spec: smith_v1.ResourceSpec{
Object: &core_v1.ServiceAccount{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.ServiceAccountKind,
APIVersion: core_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaNameWithPostfix(resource.Name, serviceAccountPostFix),
},
ImagePullSecrets: []core_v1.LocalObjectReference{{Name: apik8scompute.DockerImagePullName}},
},
},
}
smithResources = append(smithResources, serviceAccountResource)
serviceAccountNameRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(serviceAccountResource.Name, metadataElement, nameElement),
Resource: serviceAccountResource.Name,
Path: metadataNamePath,
}
references = append(references, serviceAccountNameRef)
// default env vars for containers
var envDefault []core_v1.EnvVar
// ASAP public key servers
// we want every container to know where to get the public keys
// regardless if they're using ASAP or not
envDefault = append(envDefault, asapkey.GetPublicKeyRepoEnvVars(context.StateContext.Location)...)
// always bind to the common secret, it's OK if it doesn't exist
trueVar := true
commonEnvFrom := core_v1.EnvFromSource{
SecretRef: &core_v1.SecretEnvSource{
LocalObjectReference: core_v1.LocalObjectReference{
Name: apik8scompute.CommonSecretName,
},
Optional: &trueVar,
},
}
envFrom = append(envFrom, commonEnvFrom)
// prepare containers
containers := buildContainers(spec, envDefault, envFrom)
labelMap := map[string]string{
stateNameLabel: context.StateMeta.Name,
resourceNameLabel: string(resource.Name),
}
podSpec := buildPodSpec(containers, serviceAccountNameRef.Ref())
// The kube deployment object spec
deploymentSpec := buildDeploymentSpec(context, spec, podSpec, labelMap, iamRoleRef)
// The final wired deployment object
deployment := smith_v1.Resource{
Name: wiringutil.ResourceName(resource.Name),
References: references,
Spec: smith_v1.ResourceSpec{
Object: &apps_v1.Deployment{
TypeMeta: meta_v1.TypeMeta{
Kind: k8s.DeploymentKind,
APIVersion: apps_v1.SchemeGroupVersion.String(),
},
ObjectMeta: meta_v1.ObjectMeta{
Name: wiringutil.MetaName(resource.Name),
},
Spec: deploymentSpec,
},
},
}
smithResources = append(smithResources, deployment)
deploymentNameRef := smith_v1.Reference{
Name: wiringutil.ReferenceName(deployment.Name, metadataElement, nameElement),
Resource: deployment.Name,
Path: metadataNamePath,
}
// 0 value for replicas indicates we don't need an HPA
if spec.Scaling.MinReplicas > 0 && spec.Scaling.MaxReplicas > 0 {
hpaSpec := buildHorizontalPodAutoscalerSpec(spec, deploymentNameRef.Ref())
// The final wired HPA object
hpa := | {
shouldUseSecretPlugin = false
} | conditional_block |
redData_extFunction.py | 1)
print()
time.sleep(2)
for item in line5:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line6:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line7:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line8:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line9:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.3)
print()
time.sleep(2)
print("\033[1;37;41m END OF LINE.")
sys.exit(0)
def formLoad(file):
with open(file) as infile:
f2 = infile.read()
print(f2)
#Data
localfileSys = {
"root": [
"kernelComp.dat",
"(Folder) Downloads"
],
"Downloads": [
"test.txt"
],
"ToolsDB.net": [
"ToolsDB.net-connectMsg.txt",
"repoUpdate.pkg",
],
"BlackHat.org": [
"BlackHat.org-connectMsg.txt",
"forumPost.txt",
"(Folder) uID"
],
"uID": [
"(Folder) CSRin"
],
"CSRin": [
"repoUpdate_havoclist.pkg",
],
"HavocDynamics.org": [
"HavocDynamics.org-connectMsg.txt",
"(Folder) Logon",
],
"Logon": [
"(Folder) Emails",
"(Folder) Software"
],
"Emails": [
"01-15-2020.txt",
"04-27-2020.txt",
"05-8-2020.txt",
"05-12-2020.txt",
"06-13-2020.txt",
"06-16-2020.txt"
],
"Software": [
"dataStream.pkg"
]
}
accessRoutelocal = {
"root": {
"Downloads"
},
"Downloads": {
},
"BlackHat.org": {
"Tools",
"uID"
},
"uID" : {
"CSRin"
},
"CSRin" : {
},
"HavocDynamics.org": {
"Logon"
},
"Logon": {
"Emails",
"Software"
},
"Emails": {
},
"Software":{
}
}
netList = [
"ToolsDB.net"
]
#ToolsDB.net
#BlackHat.org
#HavocDynamics.org
fileContent = {
"test.txt":"This is a test text file.",
"ToolsDB.net-connectMsg.txt":"Welcome to ToolsDB.net! Feel free to download any item from this page.",
"BlackHat.org-connectMsg.txt":"Welcome to BlackHat.org! Access the forum with forumPost.txt, and download from users in the uID folder.",
"HavocDynamics.org-connectMsg.txt":"Welcome! Havoc Dynamics is currently only accessible to staff.",
"repoUpdate.pkg":repoUpdate,
"repoUpdate_havoclist.pkg":repoUpdate2,
"dataStream.pkg":dataStreamEnd
}
fileType = {
"test.txt":"textFile",
"ToolsDB.net-connectMsg.txt":"textFile",
"BlackHat.org-connectMsg.txt":"textFile",
"repoUpdate.pkg":"packageFile",
"injectionBlocker_portable.pkg":"packageFile",
"forumPost.txt":"fileTypeFormatted",
"readmeStream.txt":"fileTypeFormatted",
"01-15-2020.txt":"fileTypeFormatted",
"04-27-2020.txt":"fileTypeFormatted",
"05-8-2020.txt":"fileTypeFormatted",
"05-12-2020.txt":"fileTypeFormatted",
"06-13-2020.txt":"fileTypeFormatted",
"06-16-2020.txt":"fileTypeFormatted",
"repoUpdate_havoclist.pkg":"packageFile",
"passwordCrack_install.pkg":"packageFile",
"dataStream.pkg":"cineFile"
}
passLock = {
"Logon":1,
"01-15-2020.txt":1,
"04-27-2020.txt":1,
"05-8-2020.txt":1,
"05-12-2020.txt":1,
"06-13-2020.txt":1,
"06-16-2020.txt":1,
"Software":"hvcprivlogon1021"
}
codeList = {
1:"sudo systemctl start passCrack",
2:"sudo bruteForce --FAST",
3:"sudo apt-install crashAvoidance",
4:"T mypair<T>::getmax ()",
5:"void mysequence<T,N>::setmember",
6:"void Swap(t n1, T n2)",
7:".cfi_def_cfa_register %rbp",
8:"mov eax, offset sOutFrstByte",
9:"cmpl %cax, %ebx"
}
def crackProcess(data):
def timeout_error(*_):
raise TimeoutError
randomListint = randint(1,9)
listPick = codeList[randomListint]
inp = str()
while True:
signal.signal(signal.SIGALRM, timeout_error)
signal.alarm(10)
try:
print("Input code below:")
print(listPick)
inp = str(input())
if inp == listPick:
signal.alarm(0)
passLock[data] = 0
break
except TimeoutError:
print("Ran out of time, exiting.")
signal.alarm(0)
break
def startupLoad():
bootText = "Booting HybridOS"
loadDot = "..."
for item in bootText:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
for item in loadDot:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.5)
print()
print("Welcome to HybridOS! Type help to list commands.")
def help():
global helpDict
helpDict = {
"help":"Displays this menu.",
"ls":"Lists current accessible files.",
"netls":"Lists currently accessible networks.",
"connect":"Connects to network address.",
"disconnect":"Disconnects from current network",
"cd":"Change directory. .. to go back.",
"open":"Open a file.",
"dl":"Downloads a file.",
"rm":"Deletes a file.",
"clock":"Shows the current UTC time.",
"clear":"Clears the screen",
"crack":"Cracks a folder or file",
"First Objective?":"Try connecting to a network."
}
for key, value in helpDict.items():
print(key, ":", value)
def passCrackinstall():
global passCrack
passCrack = True
def quickCheat():
repoUpdate()
repoUpdate2()
passCrackinstall()
def ls():
print(localfileSys[currentFolder])
def cd(folder):
global currentFolder
global previousFolder
if folder in passLock and passLock[folder] == 1:
print("Locked")
return
elif folder in passLock and passLock[folder] == "hvcprivlogon1021":
inp = str(input("Password > "))
if inp == passLock[folder]:
currentFolder = folder
else:
print("Incorrect password.")
return
else:
if folder == "..":
currentFolder = previousFolder
elif folder in accessRoutelocal[currentFolder]:
previousFolder = currentFolder
currentFolder = folder
else:
print("Directory does not exist.")
def openRealOne(file):
global updatedRepo
if file in localfileSys[currentFolder]:
if file in passLock and passLock[file] == 1:
print("Locked")
return
elif fileType[file] == "textFile":
print(fileContent[file])
elif fileType[file] == "packageFile":
print("Installing", file)
fileContent[file]()
elif fileType[file] == "fileTypeFormatted":
formLoad(file)
elif fileType[file] == "cineFile":
dataStreamEnd()
else:
print("File does not exist.")
def netls():
global updatedRepo
loadDot = "..."
print("Pinging current repo", end='')
for item in loadDot:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.8)
print()
for item in netList:
print(item)
print("Current available networks:", updatedRepo)
def | connect | identifier_name |
|
redData_extFunction.py | you since you first started looking into this.. Havoc Dynamics company."
line4 = "As you've just seen, their final test went critical, and thus far, it's killed many workers."
line5 = "We can't let you walk away with you knowing what they were doing there."
line6 = "Havoc was meant to silently go under, but with you here, the information has the risk of going public."
line7 = "We cannot let that happen."
line8 = "We will arrive at your location to collect you soon."
line9 = "Have a wonderful evening."
for item in line1:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line2:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line3:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line4:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line5:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line6:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line7:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line8:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line9:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.3)
print()
time.sleep(2)
print("\033[1;37;41m END OF LINE.")
sys.exit(0)
def formLoad(file):
with open(file) as infile:
f2 = infile.read()
print(f2)
#Data
localfileSys = {
"root": [
"kernelComp.dat",
"(Folder) Downloads"
],
"Downloads": [
"test.txt"
],
"ToolsDB.net": [
"ToolsDB.net-connectMsg.txt",
"repoUpdate.pkg",
],
"BlackHat.org": [
"BlackHat.org-connectMsg.txt",
"forumPost.txt",
"(Folder) uID"
],
"uID": [
"(Folder) CSRin"
],
"CSRin": [
"repoUpdate_havoclist.pkg",
],
"HavocDynamics.org": [
"HavocDynamics.org-connectMsg.txt",
"(Folder) Logon",
],
"Logon": [
"(Folder) Emails",
"(Folder) Software"
],
"Emails": [
"01-15-2020.txt",
"04-27-2020.txt",
"05-8-2020.txt",
"05-12-2020.txt",
"06-13-2020.txt",
"06-16-2020.txt"
],
"Software": [
"dataStream.pkg"
]
}
accessRoutelocal = {
"root": {
"Downloads"
},
"Downloads": {
},
"BlackHat.org": {
"Tools",
"uID"
},
"uID" : {
"CSRin"
},
"CSRin" : {
},
"HavocDynamics.org": {
"Logon"
},
"Logon": {
"Emails",
"Software"
},
"Emails": {
},
"Software":{
}
}
netList = [
"ToolsDB.net"
]
#ToolsDB.net
#BlackHat.org
#HavocDynamics.org
fileContent = {
"test.txt":"This is a test text file.",
"ToolsDB.net-connectMsg.txt":"Welcome to ToolsDB.net! Feel free to download any item from this page.",
"BlackHat.org-connectMsg.txt":"Welcome to BlackHat.org! Access the forum with forumPost.txt, and download from users in the uID folder.",
"HavocDynamics.org-connectMsg.txt":"Welcome! Havoc Dynamics is currently only accessible to staff.",
"repoUpdate.pkg":repoUpdate,
"repoUpdate_havoclist.pkg":repoUpdate2,
"dataStream.pkg":dataStreamEnd
}
fileType = {
"test.txt":"textFile",
"ToolsDB.net-connectMsg.txt":"textFile",
"BlackHat.org-connectMsg.txt":"textFile",
"repoUpdate.pkg":"packageFile",
"injectionBlocker_portable.pkg":"packageFile",
"forumPost.txt":"fileTypeFormatted",
"readmeStream.txt":"fileTypeFormatted",
"01-15-2020.txt":"fileTypeFormatted",
"04-27-2020.txt":"fileTypeFormatted",
"05-8-2020.txt":"fileTypeFormatted",
"05-12-2020.txt":"fileTypeFormatted",
"06-13-2020.txt":"fileTypeFormatted",
"06-16-2020.txt":"fileTypeFormatted",
"repoUpdate_havoclist.pkg":"packageFile",
"passwordCrack_install.pkg":"packageFile",
"dataStream.pkg":"cineFile"
}
passLock = {
"Logon":1,
"01-15-2020.txt":1,
"04-27-2020.txt":1,
"05-8-2020.txt":1,
"05-12-2020.txt":1,
"06-13-2020.txt":1,
"06-16-2020.txt":1,
"Software":"hvcprivlogon1021"
}
codeList = {
1:"sudo systemctl start passCrack",
2:"sudo bruteForce --FAST",
3:"sudo apt-install crashAvoidance",
4:"T mypair<T>::getmax ()",
5:"void mysequence<T,N>::setmember",
6:"void Swap(t n1, T n2)",
7:".cfi_def_cfa_register %rbp",
8:"mov eax, offset sOutFrstByte",
9:"cmpl %cax, %ebx"
}
def crackProcess(data):
def timeout_error(*_):
raise TimeoutError
randomListint = randint(1,9)
listPick = codeList[randomListint]
inp = str()
while True:
signal.signal(signal.SIGALRM, timeout_error)
signal.alarm(10)
try:
print("Input code below:")
print(listPick)
inp = str(input())
if inp == listPick:
signal.alarm(0)
passLock[data] = 0
break
except TimeoutError:
print("Ran out of time, exiting.")
signal.alarm(0)
break
def startupLoad():
bootText = "Booting HybridOS"
loadDot = "..."
for item in bootText:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
for item in loadDot:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.5)
print()
print("Welcome to HybridOS! Type help to list commands.")
def help():
global helpDict
helpDict = {
"help":"Displays this menu.",
"ls":"Lists current accessible files.",
"netls":"Lists currently accessible networks.",
"connect":"Connects to network address.",
"disconnect":"Disconnects from current network",
"cd":"Change directory. .. to go back.",
"open":"Open a file.",
"dl":"Downloads a file.",
"rm":"Deletes a file.",
"clock":"Shows the current UTC time.",
"clear":"Clears the screen",
"crack":"Cracks a folder or file",
"First Objective?":"Try connecting to a network."
}
for key, value in helpDict.items():
print(key, ":", value)
def passCrackinstall():
global passCrack
passCrack = True
def quickCheat():
repoUpdate()
repoUpdate2()
passCrackinstall()
def ls():
print(localfileSys[currentFolder])
def cd(folder):
global currentFolder
global previousFolder
if folder in passLock and passLock[folder] == 1:
print("Locked")
return
elif folder in passLock and passLock[folder] == "hvcprivlogon1021":
inp = str(input("Password > "))
if inp == passLock[folder]:
currentFolder = folder
else:
| print("Incorrect password.")
return | conditional_block |
|
redData_extFunction.py | (2)
for item in line8:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line9:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.3)
print()
time.sleep(2)
print("\033[1;37;41m END OF LINE.")
sys.exit(0)
def formLoad(file):
with open(file) as infile:
f2 = infile.read()
print(f2)
#Data
localfileSys = {
"root": [
"kernelComp.dat",
"(Folder) Downloads"
],
"Downloads": [
"test.txt"
],
"ToolsDB.net": [
"ToolsDB.net-connectMsg.txt",
"repoUpdate.pkg",
],
"BlackHat.org": [
"BlackHat.org-connectMsg.txt",
"forumPost.txt",
"(Folder) uID"
],
"uID": [
"(Folder) CSRin"
],
"CSRin": [
"repoUpdate_havoclist.pkg",
],
"HavocDynamics.org": [
"HavocDynamics.org-connectMsg.txt",
"(Folder) Logon",
],
"Logon": [
"(Folder) Emails",
"(Folder) Software"
],
"Emails": [
"01-15-2020.txt",
"04-27-2020.txt",
"05-8-2020.txt",
"05-12-2020.txt",
"06-13-2020.txt",
"06-16-2020.txt"
],
"Software": [
"dataStream.pkg"
]
}
accessRoutelocal = {
"root": {
"Downloads"
},
"Downloads": {
},
"BlackHat.org": {
"Tools",
"uID"
},
"uID" : {
"CSRin"
},
"CSRin" : {
},
"HavocDynamics.org": {
"Logon"
},
"Logon": {
"Emails",
"Software"
},
"Emails": {
},
"Software":{
}
}
netList = [
"ToolsDB.net"
]
#ToolsDB.net
#BlackHat.org
#HavocDynamics.org
fileContent = {
"test.txt":"This is a test text file.",
"ToolsDB.net-connectMsg.txt":"Welcome to ToolsDB.net! Feel free to download any item from this page.",
"BlackHat.org-connectMsg.txt":"Welcome to BlackHat.org! Access the forum with forumPost.txt, and download from users in the uID folder.",
"HavocDynamics.org-connectMsg.txt":"Welcome! Havoc Dynamics is currently only accessible to staff.",
"repoUpdate.pkg":repoUpdate,
"repoUpdate_havoclist.pkg":repoUpdate2,
"dataStream.pkg":dataStreamEnd
}
fileType = {
"test.txt":"textFile",
"ToolsDB.net-connectMsg.txt":"textFile",
"BlackHat.org-connectMsg.txt":"textFile",
"repoUpdate.pkg":"packageFile",
"injectionBlocker_portable.pkg":"packageFile",
"forumPost.txt":"fileTypeFormatted",
"readmeStream.txt":"fileTypeFormatted",
"01-15-2020.txt":"fileTypeFormatted",
"04-27-2020.txt":"fileTypeFormatted",
"05-8-2020.txt":"fileTypeFormatted",
"05-12-2020.txt":"fileTypeFormatted",
"06-13-2020.txt":"fileTypeFormatted",
"06-16-2020.txt":"fileTypeFormatted",
"repoUpdate_havoclist.pkg":"packageFile",
"passwordCrack_install.pkg":"packageFile",
"dataStream.pkg":"cineFile"
}
passLock = {
"Logon":1,
"01-15-2020.txt":1,
"04-27-2020.txt":1,
"05-8-2020.txt":1,
"05-12-2020.txt":1,
"06-13-2020.txt":1,
"06-16-2020.txt":1,
"Software":"hvcprivlogon1021"
}
codeList = {
1:"sudo systemctl start passCrack",
2:"sudo bruteForce --FAST",
3:"sudo apt-install crashAvoidance",
4:"T mypair<T>::getmax ()",
5:"void mysequence<T,N>::setmember",
6:"void Swap(t n1, T n2)",
7:".cfi_def_cfa_register %rbp",
8:"mov eax, offset sOutFrstByte",
9:"cmpl %cax, %ebx"
}
def crackProcess(data):
def timeout_error(*_):
raise TimeoutError
randomListint = randint(1,9)
listPick = codeList[randomListint]
inp = str()
while True:
signal.signal(signal.SIGALRM, timeout_error)
signal.alarm(10)
try:
print("Input code below:")
print(listPick)
inp = str(input())
if inp == listPick:
signal.alarm(0)
passLock[data] = 0
break
except TimeoutError:
print("Ran out of time, exiting.")
signal.alarm(0)
break
def startupLoad():
bootText = "Booting HybridOS"
loadDot = "..."
for item in bootText:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
for item in loadDot:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.5)
print()
print("Welcome to HybridOS! Type help to list commands.")
def help():
global helpDict
helpDict = {
"help":"Displays this menu.",
"ls":"Lists current accessible files.",
"netls":"Lists currently accessible networks.",
"connect":"Connects to network address.",
"disconnect":"Disconnects from current network",
"cd":"Change directory. .. to go back.",
"open":"Open a file.",
"dl":"Downloads a file.",
"rm":"Deletes a file.",
"clock":"Shows the current UTC time.",
"clear":"Clears the screen",
"crack":"Cracks a folder or file",
"First Objective?":"Try connecting to a network."
}
for key, value in helpDict.items():
print(key, ":", value)
def passCrackinstall():
global passCrack
passCrack = True
def quickCheat():
repoUpdate()
repoUpdate2()
passCrackinstall()
def ls():
print(localfileSys[currentFolder])
def cd(folder):
global currentFolder
global previousFolder
if folder in passLock and passLock[folder] == 1:
print("Locked")
return
elif folder in passLock and passLock[folder] == "hvcprivlogon1021":
inp = str(input("Password > "))
if inp == passLock[folder]:
currentFolder = folder
else:
print("Incorrect password.")
return
else:
if folder == "..":
currentFolder = previousFolder
elif folder in accessRoutelocal[currentFolder]:
previousFolder = currentFolder
currentFolder = folder
else:
print("Directory does not exist.")
def openRealOne(file):
global updatedRepo
if file in localfileSys[currentFolder]:
if file in passLock and passLock[file] == 1:
print("Locked")
return
elif fileType[file] == "textFile":
print(fileContent[file])
elif fileType[file] == "packageFile":
print("Installing", file)
fileContent[file]()
elif fileType[file] == "fileTypeFormatted":
formLoad(file)
elif fileType[file] == "cineFile":
dataStreamEnd()
else:
print("File does not exist.")
def netls():
global updatedRepo
loadDot = "..."
print("Pinging current repo", end='')
for item in loadDot:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.8)
print()
for item in netList:
print(item)
print("Current available networks:", updatedRepo)
def connect(netLocation):
| global currentFolder
global connected
connected = True
if netLocation in netList:
loadDot = "..."
print("Connecting to address", end='')
for item in loadDot:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.8)
print()
connectMsgcheck = netLocation + "-connectMsg.txt"
print(fileContent[connectMsgcheck])
currentFolder = netLocation
connected = True
else:
print("Network does not exist.") | identifier_body |
|
redData_extFunction.py | : SHTIKH3912\nAMSPEC Temp: 79C\nSystem Status: Stable")
time.sleep(3)
os.system('clear')
print("robertcapluund.security: It's going to go critical! Everyone needs to get the hell out of here!\nCurrent Sample: SHTIKH3912\nAMSPEC Temp: 88C\nSystem Status: WARN")
time.sleep(4)
os.system('clear')
print("simonhendrikkson.labtech: My god.\nCurrent Sample: SHTIKH3912\nAMSPEC Temp: 164C\nSystem Status: MELTDOWN")
time.sleep(2)
os.system('clear')
print("CONNECTION PROBLEM...")
time.sleep(8)
line1 = "Hello."
line2 = "You don't know who we are, but you will soon."
line3 = "We've had our eye on you since you first started looking into this.. Havoc Dynamics company."
line4 = "As you've just seen, their final test went critical, and thus far, it's killed many workers."
line5 = "We can't let you walk away with you knowing what they were doing there."
line6 = "Havoc was meant to silently go under, but with you here, the information has the risk of going public."
line7 = "We cannot let that happen."
line8 = "We will arrive at your location to collect you soon."
line9 = "Have a wonderful evening."
for item in line1:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line2:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line3:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line4:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line5:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line6:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line7:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line8:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
print()
time.sleep(2)
for item in line9:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.3)
print()
time.sleep(2)
print("\033[1;37;41m END OF LINE.")
sys.exit(0)
def formLoad(file):
with open(file) as infile:
f2 = infile.read()
print(f2)
#Data
localfileSys = {
"root": [
"kernelComp.dat",
"(Folder) Downloads"
],
"Downloads": [
"test.txt"
],
"ToolsDB.net": [
"ToolsDB.net-connectMsg.txt",
"repoUpdate.pkg",
],
"BlackHat.org": [
"BlackHat.org-connectMsg.txt",
"forumPost.txt",
"(Folder) uID"
],
"uID": [
"(Folder) CSRin"
],
"CSRin": [
"repoUpdate_havoclist.pkg",
],
"HavocDynamics.org": [
"HavocDynamics.org-connectMsg.txt",
"(Folder) Logon",
],
"Logon": [
"(Folder) Emails",
"(Folder) Software"
],
"Emails": [
"01-15-2020.txt",
"04-27-2020.txt",
"05-8-2020.txt",
"05-12-2020.txt",
"06-13-2020.txt",
"06-16-2020.txt"
],
"Software": [
"dataStream.pkg"
]
}
accessRoutelocal = {
"root": {
"Downloads"
},
"Downloads": {
},
"BlackHat.org": {
"Tools",
"uID"
},
"uID" : {
"CSRin"
},
"CSRin" : {
},
"HavocDynamics.org": {
"Logon"
},
"Logon": {
"Emails",
"Software"
},
"Emails": {
},
"Software":{
}
}
netList = [
"ToolsDB.net"
]
#ToolsDB.net
#BlackHat.org
#HavocDynamics.org
fileContent = {
"test.txt":"This is a test text file.",
"ToolsDB.net-connectMsg.txt":"Welcome to ToolsDB.net! Feel free to download any item from this page.",
"BlackHat.org-connectMsg.txt":"Welcome to BlackHat.org! Access the forum with forumPost.txt, and download from users in the uID folder.",
"HavocDynamics.org-connectMsg.txt":"Welcome! Havoc Dynamics is currently only accessible to staff.",
"repoUpdate.pkg":repoUpdate,
"repoUpdate_havoclist.pkg":repoUpdate2,
"dataStream.pkg":dataStreamEnd
}
fileType = {
"test.txt":"textFile",
"ToolsDB.net-connectMsg.txt":"textFile",
"BlackHat.org-connectMsg.txt":"textFile",
"repoUpdate.pkg":"packageFile",
"injectionBlocker_portable.pkg":"packageFile",
"forumPost.txt":"fileTypeFormatted",
"readmeStream.txt":"fileTypeFormatted",
"01-15-2020.txt":"fileTypeFormatted",
"04-27-2020.txt":"fileTypeFormatted",
"05-8-2020.txt":"fileTypeFormatted",
"05-12-2020.txt":"fileTypeFormatted",
"06-13-2020.txt":"fileTypeFormatted",
"06-16-2020.txt":"fileTypeFormatted",
"repoUpdate_havoclist.pkg":"packageFile",
"passwordCrack_install.pkg":"packageFile",
"dataStream.pkg":"cineFile"
}
passLock = {
"Logon":1,
"01-15-2020.txt":1,
"04-27-2020.txt":1,
"05-8-2020.txt":1,
"05-12-2020.txt":1,
"06-13-2020.txt":1,
"06-16-2020.txt":1,
"Software":"hvcprivlogon1021"
}
codeList = {
1:"sudo systemctl start passCrack",
2:"sudo bruteForce --FAST",
3:"sudo apt-install crashAvoidance",
4:"T mypair<T>::getmax ()",
5:"void mysequence<T,N>::setmember",
6:"void Swap(t n1, T n2)",
7:".cfi_def_cfa_register %rbp",
8:"mov eax, offset sOutFrstByte",
9:"cmpl %cax, %ebx"
}
def crackProcess(data):
def timeout_error(*_):
raise TimeoutError
randomListint = randint(1,9)
listPick = codeList[randomListint]
inp = str()
while True:
signal.signal(signal.SIGALRM, timeout_error)
signal.alarm(10)
try:
print("Input code below:")
print(listPick)
inp = str(input())
if inp == listPick:
signal.alarm(0)
passLock[data] = 0
break
except TimeoutError:
print("Ran out of time, exiting.")
signal.alarm(0)
break
def startupLoad():
bootText = "Booting HybridOS"
loadDot = "..."
for item in bootText:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.1)
for item in loadDot:
sys.stdout.write(item)
sys.stdout.flush()
time.sleep(0.5)
print()
print("Welcome to HybridOS! Type help to list commands.")
def help():
global helpDict
helpDict = {
"help":"Displays this menu.",
"ls":"Lists current accessible files.",
"netls":"Lists currently accessible networks.",
"connect":"Connects to network address.",
"disconnect":"Disconnects from current network",
"cd":"Change directory. .. to go back.",
"open":"Open a file.",
"dl":"Downloads a file.", | "rm":"Deletes a file.", | random_line_split |
|
lib.rs | , Cursor, PTraceState, PTraceStateRef, RegNum};
/// The result type returned by methods in this crate.
pub type Result<T> = result::Result<T, Error>;
| }
/// The error type returned by methods in this crate.
#[derive(Debug)]
pub struct Error(ErrorInner);
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ErrorInner::Io(ref e) => fmt::Display::fmt(e, fmt),
ErrorInner::Unwind(ref e) => fmt::Display::fmt(e, fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"rstack error"
}
fn cause(&self) -> Option<&error::Error> {
match self.0 {
ErrorInner::Io(ref e) => Some(e),
ErrorInner::Unwind(ref e) => Some(e),
}
}
}
/// Information about a remote process.
#[derive(Debug, Clone)]
pub struct Process {
id: u32,
threads: Vec<Thread>,
}
impl Process {
/// Returns the process's ID.
pub fn id(&self) -> u32 {
self.id
}
/// Returns information about the threads of the process.
pub fn threads(&self) -> &[Thread] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if !threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue;
} else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
| #[derive(Debug)]
enum ErrorInner {
Io(io::Error),
Unwind(unwind::Error), | random_line_split |
lib.rs | , Cursor, PTraceState, PTraceStateRef, RegNum};
/// The result type returned by methods in this crate.
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
enum | {
Io(io::Error),
Unwind(unwind::Error),
}
/// The error type returned by methods in this crate.
#[derive(Debug)]
pub struct Error(ErrorInner);
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ErrorInner::Io(ref e) => fmt::Display::fmt(e, fmt),
ErrorInner::Unwind(ref e) => fmt::Display::fmt(e, fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"rstack error"
}
fn cause(&self) -> Option<&error::Error> {
match self.0 {
ErrorInner::Io(ref e) => Some(e),
ErrorInner::Unwind(ref e) => Some(e),
}
}
}
/// Information about a remote process.
#[derive(Debug, Clone)]
pub struct Process {
id: u32,
threads: Vec<Thread>,
}
impl Process {
/// Returns the process's ID.
pub fn id(&self) -> u32 {
self.id
}
/// Returns information about the threads of the process.
pub fn threads(&self) -> &[Thread] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if !threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue;
} else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
| ErrorInner | identifier_name |
lib.rs | ] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if !threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue;
} else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
}
}
}
impl Borrow<u32> for TracedThread {
fn borrow(&self) -> &u32 {
&self.0
}
}
impl TracedThread {
fn new(pid: u32) -> io::Result<TracedThread> {
unsafe {
let ret = ptrace(
PTRACE_SEIZE,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret != 0 {
return Err(io::Error::last_os_error());
}
let thread = TracedThread(pid);
let ret = ptrace(
PTRACE_INTERRUPT,
pid as pid_t,
ptr::null_mut::<c_void>(),
ptr::null_mut::<c_void>(),
);
if ret != 0 {
return Err(io::Error::last_os_error());
}
let mut status = 0;
while waitpid(pid as pid_t, &mut status, __WALL) < 0 {
let e = io::Error::last_os_error();
if e.kind() != io::ErrorKind::Interrupted {
return Err(e);
}
}
if !WIFSTOPPED(status) | {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("unexpected wait status {}", status),
));
} | conditional_block |
|
lib.rs | , Cursor, PTraceState, PTraceStateRef, RegNum};
/// The result type returned by methods in this crate.
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
enum ErrorInner {
Io(io::Error),
Unwind(unwind::Error),
}
/// The error type returned by methods in this crate.
#[derive(Debug)]
pub struct Error(ErrorInner);
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ErrorInner::Io(ref e) => fmt::Display::fmt(e, fmt),
ErrorInner::Unwind(ref e) => fmt::Display::fmt(e, fmt),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"rstack error"
}
fn cause(&self) -> Option<&error::Error> {
match self.0 {
ErrorInner::Io(ref e) => Some(e),
ErrorInner::Unwind(ref e) => Some(e),
}
}
}
/// Information about a remote process.
#[derive(Debug, Clone)]
pub struct Process {
id: u32,
threads: Vec<Thread>,
}
impl Process {
/// Returns the process's ID.
pub fn id(&self) -> u32 {
self.id
}
/// Returns information about the threads of the process.
pub fn threads(&self) -> &[Thread] {
&self.threads
}
}
/// Information about a thread of a remote process.
#[derive(Debug, Clone)]
pub struct Thread {
id: u32,
name: Option<String>,
frames: Vec<Frame>,
}
impl Thread {
/// Returns the thread's ID.
#[inline]
pub fn id(&self) -> u32 {
self.id
}
/// Returns the thread's name, if known.
#[inline]
pub fn name(&self) -> Option<&str> {
self.name.as_ref().map(|s| &**s)
}
/// Returns the frames of the stack trace representing the state of the thread.
#[inline]
pub fn frames(&self) -> &[Frame] {
&self.frames
}
}
/// Information about a stack frame of a remote process.
#[derive(Debug, Clone)]
pub struct Frame {
ip: usize,
is_signal: Option<bool>,
name: Option<ProcedureName>,
info: Option<ProcedureInfo>,
}
impl Frame {
/// Returns the instruction pointer of the frame.
#[inline]
pub fn ip(&self) -> usize {
self.ip
}
/// Determines if the frame is from a signal handler, if known.
#[inline]
pub fn is_signal(&self) -> Option<bool> {
self.is_signal
}
/// Returns the name of the procedure that this frame is running, if known.
///
/// In certain contexts, particularly when the binary being traced or its dynamic libraries have
/// been stripped, the unwinder may not have enough information to properly identify the
/// procedure and will simply return the first label before the frame's instruction pointer. The
/// offset will always be relative to this label.
#[inline]
pub fn name(&self) -> Option<&ProcedureName> {
self.name.as_ref()
}
/// Returns information about the procedure that this frame is running, if known.
#[inline]
pub fn info(&self) -> Option<&ProcedureInfo> {
self.info.as_ref()
}
}
/// Information about a name of a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureName {
name: String,
offset: usize,
}
impl ProcedureName {
/// Returns the name of the procedure.
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns the offset of the instruction pointer from this procedure's starting address.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
}
/// Information about a procedure.
#[derive(Debug, Clone)]
pub struct ProcedureInfo {
start_ip: usize,
end_ip: usize,
}
impl ProcedureInfo {
/// Returns the starting address of this procedure.
#[inline]
pub fn start_ip(&self) -> usize {
self.start_ip
}
/// Returns the ending address of this procedure.
#[inline]
pub fn end_ip(&self) -> usize {
self.end_ip
}
}
/// A struct controlling the behavior of tracing.
#[derive(Debug, Clone)]
pub struct TraceOptions {
thread_names: bool,
procedure_names: bool,
procedure_info: bool,
}
impl Default for TraceOptions {
fn default() -> TraceOptions {
TraceOptions {
thread_names: false,
procedure_names: false,
procedure_info: false,
}
}
}
impl TraceOptions {
/// Returns a new `TraceOptions` with default settings.
pub fn new() -> TraceOptions {
TraceOptions::default()
}
/// If set, the names of the process's threads will be recorded.
///
/// Defaults to `false`.
pub fn thread_names(&mut self, thread_names: bool) -> &mut TraceOptions {
self.thread_names = thread_names;
self
}
/// If set, the names of the procedures running in the frames of the process's threads will be
/// recorded.
///
/// Defaults to `false`.
pub fn procedure_names(&mut self, procedure_names: bool) -> &mut TraceOptions {
self.procedure_names = procedure_names;
self
}
/// If set, information about the procedures running in the frames of the process's threads will
/// be recorded.
///
/// Defaults to `false`.
pub fn procedure_info(&mut self, procedure_info: bool) -> &mut TraceOptions {
self.procedure_info = procedure_info;
self
}
/// Traces the threads of the specified process.
pub fn trace(&self, pid: u32) -> Result<Process> {
let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT)
.map_err(|e| Error(ErrorInner::Unwind(e)))?;
let threads = get_threads(pid)?;
let mut traces = vec![];
for thread in &threads {
let name = if self.thread_names {
get_name(pid, thread.0)
} else {
None
};
match thread.dump(&space, self) {
Ok(frames) => traces.push(Thread {
id: thread.0,
name,
frames,
}),
Err(e) => debug!("error tracing thread {}: {}", thread.0, e),
}
}
Ok(Process {
id: pid,
threads: traces,
})
}
}
/// A convenience wrapper over `TraceOptions` which returns a maximally verbose trace.
pub fn trace(pid: u32) -> Result<Process> {
TraceOptions::new()
.thread_names(true)
.procedure_names(true)
.procedure_info(true)
.trace(pid)
}
fn get_threads(pid: u32) -> Result<BTreeSet<TracedThread>> {
let mut threads = BTreeSet::new();
let path = format!("/proc/{}/task", pid);
// new threads may be created while we're in the process of stopping them all, so loop a couple
// of times to hopefully converge
for _ in 0..5 {
let prev = threads.len();
add_threads(&mut threads, &path)?;
if prev == threads.len() {
break;
}
}
Ok(threads)
}
fn add_threads(threads: &mut BTreeSet<TracedThread>, dir: &str) -> Result<()> | } else {
return Err(Error(ErrorInner::Io(e)));
},
};
threads.insert(thread);
}
}
Ok(())
}
fn get_name(pid: u32, tid: u32) -> Option<String> {
let path = format!("/proc/{}/task/{}/comm", pid, tid);
let mut name = vec![];
match File::open(path).and_then(|mut f| f.read_to_end(&mut name)) {
Ok(_) => Some(String::from_utf8_lossy(&name).trim().to_string()),
Err(e) => {
debug!("error getting name for thread {}: {}", tid, e);
None
}
}
}
#[derive(PartialOrd, Ord, PartialEq, Eq)]
struct TracedThread(u32);
impl Drop for TracedThread {
fn drop(&mut self) {
unsafe {
ptrace(
PTRACE_DETACH,
self.0 as pid_t,
| {
for entry in fs::read_dir(dir).map_err(|e| Error(ErrorInner::Io(e)))? {
let entry = entry.map_err(|e| Error(ErrorInner::Io(e)))?;
let pid = match entry
.file_name()
.to_str()
.and_then(|s| s.parse::<u32>().ok())
{
Some(pid) => pid,
None => continue,
};
if !threads.contains(&pid) {
let thread = match TracedThread::new(pid) {
Ok(thread) => thread,
// ESRCH just means the thread died in the middle of things, which is fine
Err(e) => if e.raw_os_error() == Some(ESRCH) {
debug!("error attaching to thread {}: {}", pid, e);
continue; | identifier_body |
pyBasic.py | you to catch certain exceptions and also execute certain code depending on the exception
print("The number you provided can't divide 1 because it is 0")
except ValueError:
print("You did not provide a number")
except: # You can also have an empty except at the end to catch an unexpected exception:
print("Something went wrong")
else: # Else allows one to check if there was no exception when executing the try block. This is useful when we want to execute something only if there were no errors.
print("success a=",a)
finally: # Finally allows us to always execute something even if there is an exception or not. This is usually used to signify the end of the try except
print("Processing Complete")
#THE USE OF CONTINUE IN A LOOP
count = 0
sum = 0
while True:
inp = input("Enter a number ('quit' to interrupt): ")
try:
x = int(inp)
sum = sum + x
count = count + 1
print("Step: ",count,"Sum: ",sum)
except:
if inp == "quit":
break
else:
print("Not a number!")
continue
print("Have a good day !")
#LOOP IDIOMS
#FINDING THE LARGEST VALUE
largest_so_far = None
print("Before",largest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if largest_so_far is None or largest_so_far < num:
largest_so_far = num
print(largest_so_far,num)
print("After",largest_so_far)
#FINDING THE SMALLEST VALUE
smallest_so_far = None
print("Before",smallest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if smallest_so_far is None:
smallest_so_far = num
elif smallest_so_far > num:
smallest_so_far = num
print(smallest_so_far,num)
print("After",smallest_so_far)
#COUTING IN A LOOP
friends = ["Robert","Fred","John","Paul","Jean","Marie","Anna"]
count = 0
for friend in friends:
count = count + 1
print(count,friend)
#SUMMING IN A LOOP
scores = [4,5,67,54,56,78,7]
total = 0
count = 0
for score in scores:
total = total + score
count = count + 1
print("After",count,"Total is",total)
print("Average is",total/count)
#SEARCH USING A BOOLEAN VARIABLE
found = False
print("Before",found)
for value in[56,4,67,34,2,34,54,66,77]:
if value == 34:
found = True
print(found,value)
print("After",found)
#LISTS are mutable
Lists = [1,5.556,"Restaurant",3.20]
Lists = [1,5.556,[23,43,"%"],3.20] # Nested list in a list
Lists = [1,5.556,("name","ddb",176),3.20] # Nested tuple in a list
Lists2 = Lists + (8,9.9,"Mean",4,5.77) # Concatenate two lists
Lists.extend(["dessert","café"]) # Use extend to add elements to list
Lists.append(["Pop",10]) # Use append to add elements to list
Lists[1] = "Lionel" # Change the element based on the index
del(Lists[1]) # Delete the element based on the index
"Lionel Meylan".split() # Split the string, default is by space
"Today, now, here, it's warm.".split(",") # Split the string by comma (specified delimiter)
ratings = [4,56,76,4,89,34,99] # Sort a list in a new list
sorted_ratings = sorted(ratings)
print(sorted_ratings)
ratings.sort() # Sort a list ascending
print(ratings)
ratings.reverse() # Sort a list descending
print(ratings)
A = ["hard rock", 10, 1.2] ; B = A # COPY (copy by reference) the list A : When B equal to A; both A and B are referencing the same list in memory
print('A:', A)
print('B:', B)
print('B[0]:', B[0])
A[0] = "banana"
print('B[0]:', B[0]) # If we change the first element in A to banana, then list B also changes
B = A[:] # CLONE (clone by value) the list A : When B references a new copy or clone of the original list
print('B[0]:', B[0])
A[0] = "hard rock" # If you change A, B will not change
print('B[0]:', B[0])
#TUPLES are immutable which means you cannot update or change the values of tuple elements
Ratings = (10,9.5,"Good",4,"A",6.66,"C","Bad")
Ratings[5]
Ratings[2:3]
len(Ratings)
print(type(Ratings[0]))
print(type(Ratings[1]))
print(type(Ratings[2]))
Ratings2 = Ratings + (8,9.9,"Mean",4,5.77) # Concatenate two tuples
Ratings2
Ratings3 = (0, 9, 6, 5, 10, 8, 9, 6, 2)
Ratings3Sorted = sorted(Ratings3) # Sort the tuple
Ratings3Sorted
Ratings = (2,3,(33,44,"Perfect"),"Bad") # Nested tuples
Ratings[2]
Ratings[2][2]
Ratings[2][2][3] # We can access strings in the second nested tuples using a third index
Ratings.index("Perfect") # Find the first index of "Perfect"
#SETS are unordered (unlike lists and tuples)
album_list = ["Michael Jackson","Thriller","Thriller",1982]
album_set = set(album_list) # Transform a list into a set
album_set
album_set.add("Ghost of Ushiiro") # Add element to set
album_set.add("Michael Jackson") # Try to add duplicate element to the set
album_set.remove("Michael Jackson") # Remove an element from a set
check = "Thriller" in album_set
print(check)
album_set2 = {"Ghost of Ushiiro","PES4"}
album_set3 = album_set & album_set2 # Intersection of 2 sets
album_set1.intersection(album_set2) # Use intersection method to find the intersection of album_list1 and album_list2
album_set4 = album_set.union(album_set2) # Union of 2 sets
album_set5 = album_set1.difference(album_set2) # Find the difference in set1 but not set2
album_set3.issubset(album_set4) # Check if a set is in a subset
album_set4.issubset(album_set3) # Check if a set is in a superset
#DICTIONARIES
dict = {"key1":1,"key2":"2","key3":[3,4,5],"key4":(4,5,6),('key5'):5}
dict["key3"] # Returns the value
dict["key6"] = "2007" # Append value with key into dictionary
del(dict["key1"]) # Delete entries by key
"key6" in dict # Check if a key is in a dictionary
dict.keys() # All the keys in a dictionary
dict.values() # All the values in a dictionary
#FUNCTIONS
def hello_world():
print("Hello world")
hello_world()
def hello(): # Defines the function
x = input("Enter your name: ")
print("Hello ",x)
print("Max",max(x))
print("Min",min(x))
if x == "Lionel":
print("I know you !")
else:
print("Nice to meet you !")
hello() # Calls the function (never executed before)
def greet(lang):
"""
The documentation appears in triple quotes
"""
if lang =="french":
print("Bonjour")
elif lang =="german":
print("Guten Morgen")
else:
print("Hello")
help(greet) # Print the documentation stored within """
greet("french")
greet("german")
greet("wathEver")
def addtwo():
inpA | = input("Enter a first number: ")
intA = int(inpA)
inpB = input("Enter a second number: ")
intB = int(inpB)
added = intA + intB
return added # Returns the result of the function
pr | identifier_body |
|
pyBasic.py | become False)
while True:
inp = input("Enter 'yes', 'no' or 'quit': ")
if inp == "quit":
break
print(inp)
print("Au revoir")
#TRY & EXCEPT
x = input("Enter your age: ")
try:
age=int(x)
except:
print("Not a number")
print("Voilà")
a = 1
try: # Python tries to execute the code in the try block. In this case if there is any exception raised by the code in the try block it will be caught and the code block in the except block will be executed.
b = int(input("Please enter a number to divide a"))
a = a/b
except ZeroDivisionError: # A specific try except allows you to catch certain exceptions and also execute certain code depending on the exception
print("The number you provided can't divide 1 because it is 0")
except ValueError:
print("You did not provide a number")
except: # You can also have an empty except at the end to catch an unexpected exception:
print("Something went wrong")
else: # Else allows one to check if there was no exception when executing the try block. This is useful when we want to execute something only if there were no errors.
print("success a=",a)
finally: # Finally allows us to always execute something even if there is an exception or not. This is usually used to signify the end of the try except
print("Processing Complete")
#THE USE OF CONTINUE IN A LOOP
count = 0
sum = 0
while True:
inp = input("Enter a number ('quit' to interrupt): ")
try:
x = int(inp)
sum = sum + x
count = count + 1
print("Step: ",count,"Sum: ",sum)
except:
if inp == "quit":
break
else:
print("Not a number!")
continue
print("Have a good day !")
#LOOP IDIOMS
#FINDING THE LARGEST VALUE
largest_so_far = None
print("Before",largest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if largest_so_far is None or largest_so_far < num:
largest_so_far = num
print(largest_so_far,num)
print("After",largest_so_far)
#FINDING THE SMALLEST VALUE
smallest_so_far = None
print("Before",smallest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if smallest_so_far is None:
smallest_so_far = num
elif smallest_so_far > num:
smallest_so_far = num
print(smallest_so_far,num)
print("After",smallest_so_far)
#COUTING IN A LOOP
friends = ["Robert","Fred","John","Paul","Jean","Marie","Anna"]
count = 0
for friend in friends:
count = count + 1
print(count,friend)
#SUMMING IN A LOOP
scores = [4,5,67,54,56,78,7]
total = 0
count = 0
for score in scores:
total = total + score
count = count + 1
print("After",count,"Total is",total)
print("Average is",total/count)
#SEARCH USING A BOOLEAN VARIABLE
found = False
print("Before",found)
for value in[56,4,67,34,2,34,54,66,77]:
if value == 34:
found = True
print(found,value)
print("After",found)
#LISTS are mutable
Lists = [1,5.556,"Restaurant",3.20]
Lists = [1,5.556,[23,43,"%"],3.20] # Nested list in a list
Lists = [1,5.556,("name","ddb",176),3.20] # Nested tuple in a list
Lists2 = Lists + (8,9.9,"Mean",4,5.77) # Concatenate two lists
Lists.extend(["dessert","café"]) # Use extend to add elements to list
Lists.append(["Pop",10]) # Use append to add elements to list
Lists[1] = "Lionel" # Change the element based on the index
del(Lists[1]) # Delete the element based on the index
"Lionel Meylan".split() # Split the string, default is by space
"Today, now, here, it's warm.".split(",") # Split the string by comma (specified delimiter)
ratings = [4,56,76,4,89,34,99] # Sort a list in a new list
sorted_ratings = sorted(ratings)
print(sorted_ratings)
ratings.sort() # Sort a list ascending
print(ratings)
ratings.reverse() # Sort a list descending
print(ratings)
A = ["hard rock", 10, 1.2] ; B = A # COPY (copy by reference) the list A : When B equal to A; both A and B are referencing the same list in memory
print('A:', A)
print('B:', B)
print('B[0]:', B[0])
A[0] = "banana"
print('B[0]:', B[0]) # If we change the first element in A to banana, then list B also changes
B = A[:] # CLONE (clone by value) the list A : When B references a new copy or clone of the original list
print('B[0]:', B[0])
A[0] = "hard rock" # If you change A, B will not change
print('B[0]:', B[0])
#TUPLES are immutable which means you cannot update or change the values of tuple elements
Ratings = (10,9.5,"Good",4,"A",6.66,"C","Bad")
Ratings[5]
Ratings[2:3]
len(Ratings)
print(type(Ratings[0]))
print(type(Ratings[1]))
print(type(Ratings[2]))
Ratings2 = Ratings + (8,9.9,"Mean",4,5.77) # Concatenate two tuples
Ratings2
Ratings3 = (0, 9, 6, 5, 10, 8, 9, 6, 2)
Ratings3Sorted = sorted(Ratings3) # Sort the tuple
Ratings3Sorted
Ratings = (2,3,(33,44,"Perfect"),"Bad") # Nested tuples
Ratings[2]
Ratings[2][2]
Ratings[2][2][3] # We can access strings in the second nested tuples using a third index
Ratings.index("Perfect") # Find the first index of "Perfect"
#SETS are unordered (unlike lists and tuples)
album_list = ["Michael Jackson","Thriller","Thriller",1982]
album_set = set(album_list) # Transform a list into a set
album_set
album_set.add("Ghost of Ushiiro") # Add element to set
album_set.add("Michael Jackson") # Try to add duplicate element to the set
album_set.remove("Michael Jackson") # Remove an element from a set
check = "Thriller" in album_set
print(check)
album_set2 = {"Ghost of Ushiiro","PES4"}
album_set3 = album_set & album_set2 # Intersection of 2 sets
album_set1.intersection(album_set2) # Use intersection method to find the intersection of album_list1 and album_list2
album_set4 = album_set.union(album_set2) # Union of 2 sets
album_set5 = album_set1.difference(album_set2) # Find the difference in set1 but not set2
album_set3.issubset(album_set4) # Check if a set is in a subset
album_set4.issubset(album_set3) # Check if a set is in a superset
#DICTIONARIES
dict = {"key1":1,"key2":"2","key3":[3,4,5],"key4":(4,5,6),('key5'):5}
dict["key3"] # Returns the value
dict["key6"] = "2007" # Append value with key into dictionary
del(dict["key1"]) # Delete entries by key
"key6" in dict # Check if a key is in a dictionary
dict.keys() # All the keys in a dictionary
dict.values() # All the values in a dictionary
#FUNCTIONS
def hello_world():
print("Hello world")
hello_world()
def hello(): # Defines the function
x = input("Enter your name: ")
print("Hello ",x)
print("Max",max(x))
print("Min",min(x))
if x == "Lionel":
print | ("I know you !")
els | conditional_block |
|
pyBasic.py | a number!")
continue
print("Have a good day !")
#LOOP IDIOMS
#FINDING THE LARGEST VALUE
largest_so_far = None
print("Before",largest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if largest_so_far is None or largest_so_far < num:
largest_so_far = num
print(largest_so_far,num)
print("After",largest_so_far)
#FINDING THE SMALLEST VALUE
smallest_so_far = None
print("Before",smallest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if smallest_so_far is None:
smallest_so_far = num
elif smallest_so_far > num:
smallest_so_far = num
print(smallest_so_far,num)
print("After",smallest_so_far)
#COUTING IN A LOOP
friends = ["Robert","Fred","John","Paul","Jean","Marie","Anna"]
count = 0
for friend in friends:
count = count + 1
print(count,friend)
#SUMMING IN A LOOP
scores = [4,5,67,54,56,78,7]
total = 0
count = 0
for score in scores:
total = total + score
count = count + 1
print("After",count,"Total is",total)
print("Average is",total/count)
#SEARCH USING A BOOLEAN VARIABLE
found = False
print("Before",found)
for value in[56,4,67,34,2,34,54,66,77]:
if value == 34:
found = True
print(found,value)
print("After",found)
#LISTS are mutable
Lists = [1,5.556,"Restaurant",3.20]
Lists = [1,5.556,[23,43,"%"],3.20] # Nested list in a list
Lists = [1,5.556,("name","ddb",176),3.20] # Nested tuple in a list
Lists2 = Lists + (8,9.9,"Mean",4,5.77) # Concatenate two lists
Lists.extend(["dessert","café"]) # Use extend to add elements to list
Lists.append(["Pop",10]) # Use append to add elements to list
Lists[1] = "Lionel" # Change the element based on the index
del(Lists[1]) # Delete the element based on the index
"Lionel Meylan".split() # Split the string, default is by space
"Today, now, here, it's warm.".split(",") # Split the string by comma (specified delimiter)
ratings = [4,56,76,4,89,34,99] # Sort a list in a new list
sorted_ratings = sorted(ratings)
print(sorted_ratings)
ratings.sort() # Sort a list ascending
print(ratings)
ratings.reverse() # Sort a list descending
print(ratings)
A = ["hard rock", 10, 1.2] ; B = A # COPY (copy by reference) the list A : When B equal to A; both A and B are referencing the same list in memory
print('A:', A)
print('B:', B)
print('B[0]:', B[0])
A[0] = "banana"
print('B[0]:', B[0]) # If we change the first element in A to banana, then list B also changes
B = A[:] # CLONE (clone by value) the list A : When B references a new copy or clone of the original list
print('B[0]:', B[0])
A[0] = "hard rock" # If you change A, B will not change
print('B[0]:', B[0])
#TUPLES are immutable which means you cannot update or change the values of tuple elements
Ratings = (10,9.5,"Good",4,"A",6.66,"C","Bad")
Ratings[5]
Ratings[2:3]
len(Ratings)
print(type(Ratings[0]))
print(type(Ratings[1]))
print(type(Ratings[2]))
Ratings2 = Ratings + (8,9.9,"Mean",4,5.77) # Concatenate two tuples
Ratings2
Ratings3 = (0, 9, 6, 5, 10, 8, 9, 6, 2)
Ratings3Sorted = sorted(Ratings3) # Sort the tuple
Ratings3Sorted
Ratings = (2,3,(33,44,"Perfect"),"Bad") # Nested tuples
Ratings[2]
Ratings[2][2]
Ratings[2][2][3] # We can access strings in the second nested tuples using a third index
Ratings.index("Perfect") # Find the first index of "Perfect"
#SETS are unordered (unlike lists and tuples)
album_list = ["Michael Jackson","Thriller","Thriller",1982]
album_set = set(album_list) # Transform a list into a set
album_set
album_set.add("Ghost of Ushiiro") # Add element to set
album_set.add("Michael Jackson") # Try to add duplicate element to the set
album_set.remove("Michael Jackson") # Remove an element from a set
check = "Thriller" in album_set
print(check)
album_set2 = {"Ghost of Ushiiro","PES4"}
album_set3 = album_set & album_set2 # Intersection of 2 sets
album_set1.intersection(album_set2) # Use intersection method to find the intersection of album_list1 and album_list2
album_set4 = album_set.union(album_set2) # Union of 2 sets
album_set5 = album_set1.difference(album_set2) # Find the difference in set1 but not set2
album_set3.issubset(album_set4) # Check if a set is in a subset
album_set4.issubset(album_set3) # Check if a set is in a superset
#DICTIONARIES
dict = {"key1":1,"key2":"2","key3":[3,4,5],"key4":(4,5,6),('key5'):5}
dict["key3"] # Returns the value
dict["key6"] = "2007" # Append value with key into dictionary
del(dict["key1"]) # Delete entries by key
"key6" in dict # Check if a key is in a dictionary
dict.keys() # All the keys in a dictionary
dict.values() # All the values in a dictionary
#FUNCTIONS
def hello_world():
print("Hello world")
hello_world()
def hello(): # Defines the function
x = input("Enter your name: ")
print("Hello ",x)
print("Max",max(x))
print("Min",min(x))
if x == "Lionel":
print("I know you !")
else:
print("Nice to meet you !")
hello() # Calls the function (never executed before)
def greet(lang):
"""
The documentation appears in triple quotes
"""
if lang =="french":
print("Bonjour")
elif lang =="german":
print("Guten Morgen")
else:
print("Hello")
help(greet) # Print the documentation stored within """
greet("french")
greet("german")
greet("wathEver")
def addtwo():
inpA = input("Enter a first number: ")
intA = int(inpA)
inpB = input("Enter a second number: ")
intB = int(inpB)
added = intA + intB
return added # Returns the result of the function
print("Sum is",addtwo())
print("Sum is",addtwo())
def square(a):
return print(a*a)
x = 3 # Initialize a Global variable
square(x)
square(3) # Directly enter a parameter
def standard(unit=0): # Set the default value
if unit == 0:
print("not started")
else:
print("started")
standard() # With default value
standart(9) # With a value of 9
def PrintList(the_list): # Print a list using for loop
for element in the_list:
print(element)
PrintList(['1', 1, 'the man', "abc"])
def addItems(list): # Use function to append in a list
list.append("Three")
list.append("Four")
myList = []
addItems(myList)
myList
def printAll(*args): # All the arguments are into args like a tuple
print("No of arguments:", len(args))
for argument in args:
print(argument)
printAll('Horsefeather','Adonis','Bone')
def print | Dictionary(**ar | identifier_name |
|
pyBasic.py |
cc = str(bb) # to string
print(cc)
print(type(cc))
dd = int(aa) # to integer
print(dd)
print(type(dd))
bool("1") # cast to a boolean value
bool(1)
bool(0)
int(True)
int(False)
my_age = 39
print(my_age)
my_age += 1 #shortcut to update a value
print(my_age)
#NUMERIC EXPRESSIONS
a = 3
aa = a**2 # puissance
print(aa)
ii = 23
ff = ii // 5 # résultat de la division sans le reste
print(ff)
jj = ii / 5 # résultat de la division
print(jj)
kk = ii % 5 # reste de la division
print(kk)
#USER INPUT
Name = input("Enter your name: ")
print("Nice to meet you",Name)
#STRING OPERATIONS
Name[3] # Print the element on index 3 in the string
Name[-1] # Print the last element on index in the string
Name[3:8] # Slicing a string
Name[::2] # Retreive one out of two
Name[0:6:2] # Retreive one out of two (first 6 only)
Good="GsoAo+d"
Good[::2] # Retreive one out of two
print("Lionel \ Meylan") # Backslash
print("Lionel \\ Meylan") # Include back slash in string
print(r"Lionel \ Meylan") # r will tell python that string will be display as raw string
len(Name) # Length of a string
Name + " is the best" # Concatenate two strings
3 * Name # Print the string for 3 times
"\n" # New line escape sequence
"\t" # Tabulate
#STRING METHODS
up_Name = Name.upper() # Convert all the characters in string to upper case
"uppercase".upper()
low_Name = Name.lower() # Convert all the characters in string to lower case
"lowercase".lower()
Place = Name + "live in Switzerland"
D = Place.replace("Switzerland","CH") # Replace the old substring with the new target substring is the segment has been found in the string
D.find("live") # Find the substring in the string. Only the index of the first elment of substring in string will be the output
D.find("China") # If cannot find the substring in the string, answer is -1
#CONDITIONAL OPERATORS
# < Less than
# <= Less than or Equal
# == Equal to
# >= Greater than or Equal
# > Greater than
# != Not equal
#IF STATEMENT
inp = input("Enter your age:")
age = int(x)
if age > 65:
print("congratulations")
elif age >= 18:
print("enjoy your work")
else:
print("keep dreaming")
if not (album_year == '1984'): # The not statement checks if the statement is false
print ("Album year is not 1984")
#LOOPS AND ITERATIONS
#DEFINITE LOOP
#REPEATED STEPS WITH WHILE
n = 5
while n > 0:
print(n)
n = n - 1
print("Game over",n)
PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]
i = 1
Rating = PlayListRatings[0] # While loop example with length of a list and score less than 6
while(i < len(PlayListRatings) and Rating >= 6):
print(Rating)
Rating = PlayListRatings[i]
i = i + 1
squares = ['orange', 'orange', 'purple', 'blue ', 'orange']
new_squares = [] # While loop to copy the strings of the list squares to the list new_squares. Stop and exit the loop if the value on the list is not 'orange'
i = 0
while(i< len(squares) and squares[i] == "orange"):
print(square)
new_squares.append(squares[i])
i = i + 1
print(new_squares)
#IN RANGE
for i in range(5):
print(i)
print("Voilà!")
#FOR ... IN
for i in("\n",12,"cats","\n",34,"dogs"):
print(i)
friends = ["S ophie","O livier","R obert","R ichard","Y van"]
for friend in friends:
print(friend)
dates = [1982,1980,1973] # For loop example with length of a list
N = len(dates)
for i in range(N):
print(dates[i])
squares=['red','blue','green','purple']
for i, square in enumerate(squares): # Loop through the list and iterate on both index and element value
print(i, square)
#INDEFINITE LOOP
#BREAKING OUT OF AN INDEFINITE LOOP (keeps going until a logical condition become False)
while True:
inp = input("Enter 'yes', 'no' or 'quit': ")
if inp == "quit":
break
print(inp)
print("Au revoir")
#TRY & EXCEPT
x = input("Enter your age: ")
try:
age=int(x)
except:
print("Not a number")
print("Voilà")
a = 1
try: # Python tries to execute the code in the try block. In this case if there is any exception raised by the code in the try block it will be caught and the code block in the except block will be executed.
b = int(input("Please enter a number to divide a"))
a = a/b
except ZeroDivisionError: # A specific try except allows you to catch certain exceptions and also execute certain code depending on the exception
print("The number you provided can't divide 1 because it is 0")
except ValueError:
print("You did not provide a number")
except: # You can also have an empty except at the end to catch an unexpected exception:
print("Something went wrong")
else: # Else allows one to check if there was no exception when executing the try block. This is useful when we want to execute something only if there were no errors.
print("success a=",a)
finally: # Finally allows us to always execute something even if there is an exception or not. This is usually used to signify the end of the try except
print("Processing Complete")
#THE USE OF CONTINUE IN A LOOP
count = 0
sum = 0
while True:
inp = input("Enter a number ('quit' to interrupt): ")
try:
x = int(inp)
sum = sum + x
count = count + 1
print("Step: ",count,"Sum: ",sum)
except:
if inp == "quit":
break
else:
print("Not a number!")
continue
print("Have a good day !")
#LOOP IDIOMS
#FINDING THE LARGEST VALUE
largest_so_far = None
print("Before",largest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if largest_so_far is None or largest_so_far < num:
largest_so_far = num
print(largest_so_far,num)
print("After",largest_so_far)
#FINDING THE SMALLEST VALUE
smallest_so_far = None
print("Before",smallest_so_far)
for num in [89,34,3,5,34,65,23,54,234,54,345]:
if smallest_so_far is None:
smallest_so_far = num
elif smallest_so_far > num:
smallest_so_far = num
print(smallest_so_far,num)
print("After",smallest_so_far)
#COUTING IN A LOOP
friends = ["Robert","Fred","John","Paul","Jean","Marie","Anna"]
count = 0
for friend in friends:
count = count + 1
print(count,friend)
#SUMMING IN A LOOP
scores = [4,5,67,54,56,78,7]
total = 0
count = 0
for score in scores:
total = total + score
count = count + 1
print("After",count,"Total is",total)
print("Average is",total/count)
#SEARCH USING A BOOLEAN VARIABLE
found = False
print("Before",found)
for value in[56,4,67,34,2,34,54,66,77]:
if value == 34:
found = True
print(found,value)
print("After",found)
#LISTS are mutable
Lists = [1, | print(type(aa)) # impression du typage
bb = float(aa) # to float
print(bb)
print(type(bb)) | random_line_split |
|
lib.rs | #[derive(Debug, Eq, PartialEq)]
/// struct Submarine(usize);
///
/// assert!(topo::Env::get::<Submarine>().is_none());
///
/// topo::call!({
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
///
/// topo::call!({
/// assert_eq!(&Submarine(2), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(2),
/// });
///
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(1),
/// });
///
/// assert!(topo::Env::get::<Submarine>().is_none());
/// ```
#[macro_export]
macro_rules! call {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: false, call: $($input)*)
}}
}
/// Roots a topology at a particular callsite while calling the provided expression with the same
/// convention as [`call`].
///
/// Normally, when a topological function is repeatedly bound to the same callsite in a loop,
/// each invocation receives a different [`Id`], as these invocations model siblings in the
/// topology. The overall goal of this crate, however, is to provide imperative codepaths with
/// stable identifiers *across* executions at the same callsite. In practice, we must have a root
/// to the subtopology we are maintaining across these impure calls, and after each execution of the
/// subtopology it must reset the state at its [`Id`] so that the next execution of the root
/// is bound to the same point at its parent as its previous execution was. This is...an opaque
/// explanation at best and TODO revise it.
///
/// In this first example, a scope containing the loop can observe each separate loop
/// iteration mutating `count` and the root closure mutating `exit`. The variables `root_ids` and
/// `child_ids` observe the identifiers of the
///
/// ```
/// # use topo::{self, *};
/// # use std::collections::{HashMap, HashSet};
/// struct LoopCount(usize);
///
/// let mut count = 0;
/// let mut exit = false;
/// let mut root_ids = HashSet::new();
/// let mut child_ids = HashMap::new();
/// while !exit {
/// count += 1;
/// topo::root!({
/// root_ids.insert(topo::Id::current());
/// assert_eq!(
/// root_ids.len(),
/// 1,
/// "the Id of this scope should be repeated, not incremented"
/// );
///
/// let outer_count = topo::Env::get::<LoopCount>().unwrap().0;
/// assert!(outer_count <= 10);
/// if outer_count == 10 {
/// exit = true;
/// }
///
/// for i in 0..10 {
/// topo::call!({
/// let current_id = topo::Id::current();
/// if outer_count > 1 {
/// assert_eq!(child_ids[&i], current_id);
/// }
/// child_ids.insert(i, current_id);
/// assert!(
/// child_ids.len() <= 10,
/// "only 10 children should be observed across all loop iterations",
/// );
/// });
/// }
/// assert_eq!(child_ids.len(), 10);
/// }, env! {
/// LoopCount => LoopCount(count),
/// });
/// assert_eq!(child_ids.len(), 10);
/// assert_eq!(root_ids.len(), 1);
/// }
/// ```
#[macro_export]
macro_rules! root {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: true, call: $($input)*)
}}
}
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_raw_call {
(is_root: $is_root:expr, call: $inner:expr $(, env! { $($env:tt)* })?) => {{
struct UwuDaddyRustcGibUniqueTypeIdPlsPls; // thanks for the great name idea, cjm00!
#[allow(unused_mut)]
let mut _new_env = Default::default();
$( _new_env = $crate::env! { $($env)* }; )?
let _reset_to_parent_on_drop_pls = $crate::Point::unstable_pin_prev_enter_child(
std::any::TypeId::of::<UwuDaddyRustcGibUniqueTypeIdPlsPls>(),
_new_env,
$is_root
);
$inner
}};
}
/// Identifies an activation record in the call topology. This is implemented approximately similar
/// to the [hash cons][cons] of preceding topological function invocations' `Id`s.
///
/// TODO explore analogies to instruction and stack pointers?
/// TODO explore more efficient implementations by piggybacking on those?
///
/// [cons]: https://en.wikipedia.org/wiki/Hash_consing
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Id(u64);
impl Id {
/// Returns the `Id` for the current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else {
parent.child(callsite_ty, add_env)
};
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
| ty: TypeId,
count: usize,
}
impl |
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
| random_line_split |
lib.rs | current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else {
parent.child(callsite_ty, add_env)
};
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
ty: TypeId,
count: usize,
}
impl Callsite {
fn new(ty: TypeId, last_child: &Option<Callsite>) -> Self {
let prev_count = match last_child {
Some(ref prev) if prev.ty == ty => prev.count,
_ => 0,
};
Self {
ty,
count: prev_count + 1,
}
}
}
/// Immutable environment container for the current (sub)topology. Environment values can be
/// provided by parent topological invocations (currently just with [`call`] and
/// [`root`]), but child functions can only mutate their environment through interior
/// mutability.
///
/// The environment is type-indexed/type-directed, and each `Env` holds 0-1 instances
/// of every [`std::any::Any`]` + 'static` type. Access is provided through read-only references.
///
/// Aside: one interesting implication of the above is the ability to define "private scoped global
/// values" which are private to functions which are nonetheless propagating the values with
/// their control flow. This can be useful for runtimes to offer themselves execution-local values
/// in functions which are invoked by external code. It can also be severely abused, like any
/// implicit state, and should be used with caution.
#[derive(Clone, Debug, Default)]
pub struct Env {
inner: Rc<EnvInner>,
}
type EnvInner = Map<TypeId, Rc<dyn Any>>;
impl Env {
/// Returns a reference to a value in the current environment if it has been added to the
/// environment by parent/enclosing [`call`] invocations.
pub fn get<E>() -> Option<impl Deref<Target = E> + 'static>
where
E: Any + 'static,
{
Point::with_current(|current| {
current
.state
.env
.inner
.get(&TypeId::of::<E>())
.map(|guard| {
OwningRef::new(guard.to_owned()).map(|anon| anon.downcast_ref().unwrap())
})
})
}
/// Returns a reference to a value in the current environment, as [`Env::get`] does, but panics
/// if the value has not been set in the environment.
// TODO typename for debugging here would be v. nice
pub fn expect<E>() -> impl Deref<Target = E> + 'static
where
E: Any + 'static,
{
Self::get().expect("expected a value from the environment, found none")
}
fn child(&self, additional: EnvInner) -> Env {
let mut new: EnvInner = (*self.inner).to_owned();
new.extend(additional.iter().map(|(t, v)| (*t, v.clone())));
Env {
inner: Rc::new(new),
}
}
}
/// Defines a new macro (named after the first metavariable) which calls a function (named in
/// the second metavariable) in a `Point` specific to this callsite and its parents.
///
/// As a quirk of the `macro_rules!` parser, we have to "bring our own" metavariables for the new
/// macro's args and their expansion for the wrapped function. This makes for an awkward invocation,
/// but it's only invoked from the proc macro attribute for generating topological macros.
///
/// This is used to work around procedural macro hygiene restrictions, allowing us to "generate" a
/// macro from a procedural macro without needing to enable a (as of writing) unstable feature.
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_make_topo_macro {
(
$name:ident $mangled_name:ident
match $matcher:tt
subst $pass:tt
doc ($($docs:tt)*)
) => {
$($docs)*
#[macro_export]
macro_rules! $name {
$matcher => {
topo::unstable_raw_call!(is_root: false, call: $mangled_name $pass)
};
}
};
}
/// Declare additional environment values to expose to a child topological function's call tree.
#[macro_export]
macro_rules! env {
($($env_item_ty:ty => $env_item:expr,)*) => {{
use std::collections::HashMap;
#[allow(unused_mut)]
let mut new_env = HashMap::new();
$({
use std::{
any::{Any, TypeId},
rc::Rc,
};
new_env.insert(
TypeId::of::<$env_item_ty>(),
Rc::new($env_item) as Rc<dyn Any>,
);
})*
new_env
}}
}
#[cfg(test)]
mod tests {
use super::{Env, Id};
#[test]
fn one_child_in_a_loop() {
let root = Id::current();
assert_eq!(root, Id::current());
let mut prev = root;
for _ in 0..100 {
let called;
call!({
let current = Id::current();
assert_ne!(prev, current, "each Id in this loop should be unique");
prev = current;
called = true;
});
// make sure we've returned to an expected baseline
assert_eq!(root, Id::current());
assert!(called);
}
}
#[test]
fn | call_env | identifier_name |
|
lib.rs | #[derive(Debug, Eq, PartialEq)]
/// struct Submarine(usize);
///
/// assert!(topo::Env::get::<Submarine>().is_none());
///
/// topo::call!({
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
///
/// topo::call!({
/// assert_eq!(&Submarine(2), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(2),
/// });
///
/// assert_eq!(&Submarine(1), &*topo::Env::get::<Submarine>().unwrap());
/// }, env! {
/// Submarine => Submarine(1),
/// });
///
/// assert!(topo::Env::get::<Submarine>().is_none());
/// ```
#[macro_export]
macro_rules! call {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: false, call: $($input)*)
}}
}
/// Roots a topology at a particular callsite while calling the provided expression with the same
/// convention as [`call`].
///
/// Normally, when a topological function is repeatedly bound to the same callsite in a loop,
/// each invocation receives a different [`Id`], as these invocations model siblings in the
/// topology. The overall goal of this crate, however, is to provide imperative codepaths with
/// stable identifiers *across* executions at the same callsite. In practice, we must have a root
/// to the subtopology we are maintaining across these impure calls, and after each execution of the
/// subtopology it must reset the state at its [`Id`] so that the next execution of the root
/// is bound to the same point at its parent as its previous execution was. This is...an opaque
/// explanation at best and TODO revise it.
///
/// In this first example, a scope containing the loop can observe each separate loop
/// iteration mutating `count` and the root closure mutating `exit`. The variables `root_ids` and
/// `child_ids` observe the identifiers of the
///
/// ```
/// # use topo::{self, *};
/// # use std::collections::{HashMap, HashSet};
/// struct LoopCount(usize);
///
/// let mut count = 0;
/// let mut exit = false;
/// let mut root_ids = HashSet::new();
/// let mut child_ids = HashMap::new();
/// while !exit {
/// count += 1;
/// topo::root!({
/// root_ids.insert(topo::Id::current());
/// assert_eq!(
/// root_ids.len(),
/// 1,
/// "the Id of this scope should be repeated, not incremented"
/// );
///
/// let outer_count = topo::Env::get::<LoopCount>().unwrap().0;
/// assert!(outer_count <= 10);
/// if outer_count == 10 {
/// exit = true;
/// }
///
/// for i in 0..10 {
/// topo::call!({
/// let current_id = topo::Id::current();
/// if outer_count > 1 {
/// assert_eq!(child_ids[&i], current_id);
/// }
/// child_ids.insert(i, current_id);
/// assert!(
/// child_ids.len() <= 10,
/// "only 10 children should be observed across all loop iterations",
/// );
/// });
/// }
/// assert_eq!(child_ids.len(), 10);
/// }, env! {
/// LoopCount => LoopCount(count),
/// });
/// assert_eq!(child_ids.len(), 10);
/// assert_eq!(root_ids.len(), 1);
/// }
/// ```
#[macro_export]
macro_rules! root {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: true, call: $($input)*)
}}
}
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_raw_call {
(is_root: $is_root:expr, call: $inner:expr $(, env! { $($env:tt)* })?) => {{
struct UwuDaddyRustcGibUniqueTypeIdPlsPls; // thanks for the great name idea, cjm00!
#[allow(unused_mut)]
let mut _new_env = Default::default();
$( _new_env = $crate::env! { $($env)* }; )?
let _reset_to_parent_on_drop_pls = $crate::Point::unstable_pin_prev_enter_child(
std::any::TypeId::of::<UwuDaddyRustcGibUniqueTypeIdPlsPls>(),
_new_env,
$is_root
);
$inner
}};
}
/// Identifies an activation record in the call topology. This is implemented approximately similar
/// to the [hash cons][cons] of preceding topological function invocations' `Id`s.
///
/// TODO explore analogies to instruction and stack pointers?
/// TODO explore more efficient implementations by piggybacking on those?
///
/// [cons]: https://en.wikipedia.org/wiki/Hash_consing
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Id(u64);
impl Id {
/// Returns the `Id` for the current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else | ;
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
ty: TypeId,
count: usize,
}
impl Calls | {
parent.child(callsite_ty, add_env)
} | conditional_block |
lib.rs | The variables `root_ids` and
/// `child_ids` observe the identifiers of the
///
/// ```
/// # use topo::{self, *};
/// # use std::collections::{HashMap, HashSet};
/// struct LoopCount(usize);
///
/// let mut count = 0;
/// let mut exit = false;
/// let mut root_ids = HashSet::new();
/// let mut child_ids = HashMap::new();
/// while !exit {
/// count += 1;
/// topo::root!({
/// root_ids.insert(topo::Id::current());
/// assert_eq!(
/// root_ids.len(),
/// 1,
/// "the Id of this scope should be repeated, not incremented"
/// );
///
/// let outer_count = topo::Env::get::<LoopCount>().unwrap().0;
/// assert!(outer_count <= 10);
/// if outer_count == 10 {
/// exit = true;
/// }
///
/// for i in 0..10 {
/// topo::call!({
/// let current_id = topo::Id::current();
/// if outer_count > 1 {
/// assert_eq!(child_ids[&i], current_id);
/// }
/// child_ids.insert(i, current_id);
/// assert!(
/// child_ids.len() <= 10,
/// "only 10 children should be observed across all loop iterations",
/// );
/// });
/// }
/// assert_eq!(child_ids.len(), 10);
/// }, env! {
/// LoopCount => LoopCount(count),
/// });
/// assert_eq!(child_ids.len(), 10);
/// assert_eq!(root_ids.len(), 1);
/// }
/// ```
#[macro_export]
macro_rules! root {
($($input:tt)*) => {{
$crate::unstable_raw_call!(is_root: true, call: $($input)*)
}}
}
#[doc(hidden)]
#[macro_export]
macro_rules! unstable_raw_call {
(is_root: $is_root:expr, call: $inner:expr $(, env! { $($env:tt)* })?) => {{
struct UwuDaddyRustcGibUniqueTypeIdPlsPls; // thanks for the great name idea, cjm00!
#[allow(unused_mut)]
let mut _new_env = Default::default();
$( _new_env = $crate::env! { $($env)* }; )?
let _reset_to_parent_on_drop_pls = $crate::Point::unstable_pin_prev_enter_child(
std::any::TypeId::of::<UwuDaddyRustcGibUniqueTypeIdPlsPls>(),
_new_env,
$is_root
);
$inner
}};
}
/// Identifies an activation record in the call topology. This is implemented approximately similar
/// to the [hash cons][cons] of preceding topological function invocations' `Id`s.
///
/// TODO explore analogies to instruction and stack pointers?
/// TODO explore more efficient implementations by piggybacking on those?
///
/// [cons]: https://en.wikipedia.org/wiki/Hash_consing
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Id(u64);
impl Id {
/// Returns the `Id` for the current scope in the call topology.
pub fn current() -> Self {
fn assert_send_and_sync<T>()
where
T: Send + Sync,
{
}
assert_send_and_sync::<Id>();
CURRENT_POINT.with(|p| p.borrow().id)
}
}
/// The root of a sub-graph within the overall topology formed at runtime by the call-graph of
/// topological functions.
///
/// The current `Point` contains the local [`Env`], [`Id`], and some additional internal state to
/// uniquely identify each child topological function invocations.
#[derive(Debug)]
pub struct Point {
id: Id,
state: State,
}
thread_local! {
/// The `Point` representing the current dynamic scope.
static CURRENT_POINT: RefCell<Point> = Default::default();
}
impl Point {
/// "Root" a new child [`Point`]. When the guard returned from this function is dropped, the
/// parent point is restored as the "current" `Point`. By calling provided code while the
/// returned guard is live on the stack, we create the tree of indices and environments that
/// correspond to the topological call tree, exiting the child context when the rooted scope
/// ends.
#[doc(hidden)]
pub fn unstable_pin_prev_enter_child(
callsite_ty: TypeId,
add_env: EnvInner,
reset_on_drop: bool,
) -> impl Drop {
CURRENT_POINT.with(|parent| {
let mut parent = parent.borrow_mut();
// this must be copied *before* creating the child below, which will mutate the state
let parent_initial_state = parent.state.clone();
let child = if reset_on_drop {
let mut root = Point::default();
root.state = root.state.child(Callsite::new(callsite_ty, &None), add_env);
root
} else {
parent.child(callsite_ty, add_env)
};
let parent = replace(&mut *parent, child);
scopeguard::guard(
(parent_initial_state, parent),
move |(prev_initial_state, mut prev)| {
if reset_on_drop {
prev.state = prev_initial_state;
}
CURRENT_POINT.with(|p| p.replace(prev));
},
)
})
}
/// Mark a child Point in the topology.
fn child(&mut self, callsite_ty: TypeId, additional: EnvInner) -> Self {
let callsite = Callsite::new(callsite_ty, &self.state.last_child);
let mut hasher = DefaultHasher::new();
self.id.hash(&mut hasher);
self.state.child_count.hash(&mut hasher);
callsite.hash(&mut hasher);
let id = Id(hasher.finish());
Self {
id,
state: self.state.child(callsite, additional),
}
}
/// Runs the provided closure with access to the current [`Point`].
fn with_current<Out>(op: impl FnOnce(&Point) -> Out) -> Out {
CURRENT_POINT.with(|p| op(&*p.borrow()))
}
}
impl Default for Point {
fn default() -> Self {
Self {
id: Id(0),
state: Default::default(),
}
}
}
impl PartialEq for Point {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
#[derive(Clone, Debug, Default)]
struct State {
/// The callsite most recently bound to this one as a child.
last_child: Option<Callsite>,
/// The number of children currently bound to this `Point`.
child_count: u16,
/// The current environment.
env: Env,
}
impl State {
fn child(&mut self, callsite: Callsite, additional: EnvInner) -> Self {
self.last_child = Some(callsite);
self.child_count += 1;
Self {
last_child: None,
child_count: 0,
env: self.env.child(additional),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Callsite {
ty: TypeId,
count: usize,
}
impl Callsite {
fn new(ty: TypeId, last_child: &Option<Callsite>) -> Self {
let prev_count = match last_child {
Some(ref prev) if prev.ty == ty => prev.count,
_ => 0,
};
Self {
ty,
count: prev_count + 1,
}
}
}
/// Immutable environment container for the current (sub)topology. Environment values can be
/// provided by parent topological invocations (currently just with [`call`] and
/// [`root`]), but child functions can only mutate their environment through interior
/// mutability.
///
/// The environment is type-indexed/type-directed, and each `Env` holds 0-1 instances
/// of every [`std::any::Any`]` + 'static` type. Access is provided through read-only references.
///
/// Aside: one interesting implication of the above is the ability to define "private scoped global
/// values" which are private to functions which are nonetheless propagating the values with
/// their control flow. This can be useful for runtimes to offer themselves execution-local values
/// in functions which are invoked by external code. It can also be severely abused, like any
/// implicit state, and should be used with caution.
#[derive(Clone, Debug, Default)]
pub struct Env {
inner: Rc<EnvInner>,
}
type EnvInner = Map<TypeId, Rc<dyn Any>>;
impl Env {
/// Returns a reference to a value in the current environment if it has been added to the
/// environment by parent/enclosing [`call`] invocations.
pub fn get<E>() -> Option<impl Deref<Target = E> + 'static>
where
E: Any + 'static,
| {
Point::with_current(|current| {
current
.state
.env
.inner
.get(&TypeId::of::<E>())
.map(|guard| {
OwningRef::new(guard.to_owned()).map(|anon| anon.downcast_ref().unwrap())
})
})
} | identifier_body |
|
lib.rs | like they each contain a single number; the bots
//! must use some logic to decide what to do with each chip. You access the local control
//! computer and download the bots' instructions (your puzzle input).
//!
//! Some of the instructions specify that a specific-valued microchip should be given to a
//! specific bot; the rest of the instructions indicate what a given bot should do with its
//! lower-value or higher-value chip.
//!
//! For example, consider the following instructions:
//!
//! ```notrust
//! value 5 goes to bot 2
//! bot 2 gives low to bot 1 and high to bot 0
//! value 3 goes to bot 1
//! bot 1 gives low to output 1 and high to bot 0
//! bot 0 gives low to output 2 and high to output 0
//! value 2 goes to bot 2
//! ```
//!
//! - Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and
//! a value-5 chip.
//! - Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher
//! one (5) to bot 0.
//! - Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn | (bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
}
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get() != value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else {
Ok(())
}
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get( | transfer | identifier_name |
lib.rs | they each contain a single number; the bots
//! must use some logic to decide what to do with each chip. You access the local control
//! computer and download the bots' instructions (your puzzle input).
//!
//! Some of the instructions specify that a specific-valued microchip should be given to a
//! specific bot; the rest of the instructions indicate what a given bot should do with its
//! lower-value or higher-value chip.
//!
//! For example, consider the following instructions:
//!
//! ```notrust
//! value 5 goes to bot 2
//! bot 2 gives low to bot 1 and high to bot 0
//! value 3 goes to bot 1
//! bot 1 gives low to output 1 and high to bot 0
//! bot 0 gives low to output 2 and high to output 0
//! value 2 goes to bot 2
//! ```
//!
//! - Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and
//! a value-5 chip.
//! - Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher
//! one (5) to bot 0.
//! - Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn transfer(bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
}
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get() != value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else |
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get( | {
Ok(())
} | conditional_block |
lib.rs | it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn transfer(bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
}
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get() != value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else {
Ok(())
}
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get(2, 5),
Instruction::transfer(2, Receiver::Bot(1), Receiver::Bot(0)),
Instruction::get(1, 3),
Instruction::transfer(1, Receiver::Output(1), Receiver::Bot(0)),
Instruction::transfer(0, Receiver::Output(2), Receiver::Output(0)),
Instruction::get(2, 2),
];
#[test]
fn test_expected() {
let expected_outputs = hashmap! {
0 => 5,
1 => 2,
2 => 3,
};
let (bots, outputs) = process(EXAMPLE_INSTRUCTIONS).unwrap();
println!("Bots:");
for bot in bots.values() {
println!(" {:?}", bot);
}
println!("Outputs: {:?}", outputs);
assert!(outputs == expected_outputs);
assert_eq!(find_bot_handling(&bots, 5, 2).unwrap(), 2);
}
#[test]
fn test_parse() {
for (raw, parsed) in EXAMPLE_INSTRUCTIONS_STR
.iter()
.zip(EXAMPLE_INSTRUCTIONS.iter())
{
println!("Parsing '{}'; expecting {:?}", raw, parsed);
let got = raw.parse::<Instruction>().unwrap(); | assert_eq!(got, *parsed);
}
}
} | random_line_split |
|
lib.rs | they each contain a single number; the bots
//! must use some logic to decide what to do with each chip. You access the local control
//! computer and download the bots' instructions (your puzzle input).
//!
//! Some of the instructions specify that a specific-valued microchip should be given to a
//! specific bot; the rest of the instructions indicate what a given bot should do with its
//! lower-value or higher-value chip.
//!
//! For example, consider the following instructions:
//!
//! ```notrust
//! value 5 goes to bot 2
//! bot 2 gives low to bot 1 and high to bot 0
//! value 3 goes to bot 1
//! bot 1 gives low to output 1 and high to bot 0
//! bot 0 gives low to output 2 and high to output 0
//! value 2 goes to bot 2
//! ```
//!
//! - Initially, bot 1 starts with a value-3 chip, and bot 2 starts with a value-2 chip and
//! a value-5 chip.
//! - Because bot 2 has two microchips, it gives its lower one (2) to bot 1 and its higher
//! one (5) to bot 0.
//! - Then, bot 1 has two microchips; it puts the value-2 chip in output 1 and gives the
//! value-3 chip to bot 0.
//! - Finally, bot 0 has two microchips; it puts the 3 in output 2 and the 5 in output 0.
//!
//! In the end, output bin 0 contains a value-5 microchip, output bin 1 contains a value-2
//! microchip, and output bin 2 contains a value-3 microchip. In this configuration, bot
//! number 2 is responsible for comparing value-5 microchips with value-2 microchips.
//!
//! Based on your instructions, what is the number of the bot that is responsible for
//! comparing value-61 microchips with value-17 microchips?
use aoclib::parse;
use std::{
array,
collections::{hash_map::Entry, HashMap, VecDeque},
path::Path,
};
// These typedefs aren't type-safe with each other, but they still
// make it easier to read the code.
pub type Id = u32;
pub type Value = u32;
pub type Bots = HashMap<Id, Bot>;
pub type Outputs = HashMap<Id, Value>;
#[derive(Debug)]
pub struct Output(Id);
#[derive(Debug, Default, Clone)]
pub struct Bot {
pub id: Id,
low: Option<Value>,
high: Option<Value>,
}
impl Bot {
pub fn new(id: Id) -> Bot {
Bot {
id,
..Bot::default()
}
}
/// True if bot has two values
pub fn is_full(&self) -> bool {
self.low.is_some() && self.high.is_some()
}
/// Add a result to this bot, or error if it's full
pub fn add_value(&mut self, mut value: Value) -> Result<(), Error> {
if let Some(mut low) = self.low.take() {
if low > value {
std::mem::swap(&mut low, &mut value);
}
self.low = Some(low);
self.high = Some(value);
} else {
self.low = Some(value);
}
Ok(())
}
}
/// A Receiver is a Bot or an Output: it can receive items.
///
/// In either case, it contains the ID of the destination item
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Receiver {
#[display("bot {0}")]
Bot(Id),
#[display("output {0}")]
Output(Id),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, parse_display::FromStr, parse_display::Display)]
pub enum Instruction {
#[display("value {value} goes to bot {bot_id}")]
Get { bot_id: Id, value: Value },
#[display("bot {bot_id} gives low to {low_dest} and high to {high_dest}")]
Transfer {
bot_id: Id,
low_dest: Receiver,
high_dest: Receiver,
},
}
impl Instruction {
pub const fn get(bot_id: Id, value: Value) -> Instruction {
Instruction::Get { bot_id, value }
}
pub const fn transfer(bot_id: Id, low_dest: Receiver, high_dest: Receiver) -> Instruction |
}
/// Process a list of instructions.
///
/// Be careful--there's no guard currently in place against an incomplete list of instructions
/// leading to an infinite loop.
pub fn process(instructions: &[Instruction]) -> Result<(Bots, Outputs), Error> {
let mut bots = Bots::new();
let mut outputs = Outputs::new();
// convert to double-ended queue
let mut instructions: VecDeque<Instruction> = instructions.iter().copied().collect();
while let Some(instruction) = instructions.pop_front() {
match instruction {
Instruction::Get { value, bot_id } => bots
.entry(bot_id)
.or_insert_with(|| Bot::new(bot_id))
.add_value(value)?,
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
} => {
// clone the bot here to avoid mutable-immutable borrow issues
// bots are small; this is cheap
if let Some(Bot {
low: Some(low),
high: Some(high),
..
}) = bots.get(&bot_id).cloned()
{
// transfer instruction and bot is full
let mut give_to_receiver = |value, receiver| match receiver {
Receiver::Bot(id) => bots
.entry(id)
.or_insert_with(|| Bot::new(id))
.add_value(value),
Receiver::Output(id) => match outputs.entry(id) {
Entry::Occupied(entry) => {
// it's an error to put two different values into the same output
if *entry.get() != value {
Err(Error::OutputInsert(id, *entry.get(), value))
} else {
Ok(())
}
}
Entry::Vacant(entry) => {
entry.insert(value);
Ok(())
}
},
};
give_to_receiver(low, low_dest)?;
give_to_receiver(high, high_dest)?;
} else {
// bot is not found or not full; try again later
instructions.push_back(Instruction::transfer(bot_id, low_dest, high_dest));
}
}
}
}
Ok((bots, outputs))
}
/// Return the bot ID which handles the specified values
pub fn find_bot_handling(bots: &Bots, mut low: Value, mut high: Value) -> Result<Id, Error> {
// ensure v1 <= v2 for simpler comparisons
if low > high {
std::mem::swap(&mut low, &mut high);
}
bots.values()
.find(|bot| bot.low == Some(low) && bot.high == Some(high))
.map(|bot| bot.id)
.ok_or(Error::NoBotFound(low, high))
}
pub fn part1(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (bots, _) = process(&instructions)?;
let bot = find_bot_handling(&bots, 61, 17)?;
println!("Bot handling (61, 17): {}", bot);
Ok(())
}
pub fn part2(path: &Path) -> Result<(), Error> {
let instructions: Vec<Instruction> = parse(path)?.collect();
let (_, outputs) = process(&instructions)?;
let chips = array::IntoIter::new([0, 1, 2])
.map(|id| outputs.get(&id).ok_or(Error::NoChipFound(id)))
.collect::<Result<Vec<_>, _>>()?;
let chip_product: Value = chips.into_iter().product();
println!("Product of chips (0, 1, 2): {}", chip_product);
Ok(())
}
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("bot {1} is full but attempted to insert {0}")]
BotInsert(Value, Id),
#[error("could not find bot handling ({0}, {1})")]
NoBotFound(Value, Value),
#[error("output {0} contains {1} but attempted to insert {2}")]
OutputInsert(Id, Value, Value),
#[error("could not find a chip output {0}")]
NoChipFound(Id),
}
#[cfg(test)]
mod tests {
use super::*;
use maplit::hashmap;
const EXAMPLE_INSTRUCTIONS_STR: &[&str] = &[
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
];
const EXAMPLE_INSTRUCTIONS: &[Instruction] = &[
Instruction::get( | {
Instruction::Transfer {
bot_id,
low_dest,
high_dest,
}
} | identifier_body |
detatt_mk.py | Cov matrix will be full-rank
#used when the data are centered (because one entry is linear combination
# of others)
#
#ns is the length of the spatial dimension (default is 1)
#assumes order s=1,t=1; s=1,t=2;...; s=2,t=1; s=2,t=2;...
# if grouped by time instead, change order to 'C' (only if no missing data)
#
#if any spatial dimensions are missing time steps, supply ind with list of
#number of time steps for each spatial dimensions
#
#from Ribes code and ECOF code
import numpy as np
from scipy import linalg
n1 = np.size(y) #assumes y is a single column of data (or row, I guess)
if np.mod(float(n1),ns)!= 0:
if ind == None:
raise ValueError('check dimensions or supply indices')
if ind == None:
n = int(n1/ns)
m = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
if ns==1:
xr = np.dot(p,x)
yr = np.dot(p,y)
cn1 = np.dot(p,noise1)
cn2 = np.dot(p,noise2)
else:
yr = np.dot(p,np.reshape(y,(-1,ns),order=order)).flatten(order=order)[:,None]
xr = np.zeros(((n-1)*ns,len(x[0,:])))
for j in range(len(x[0,:])):
xr[:,j] = np.dot(p,np.reshape(x[:,j],(-1,ns),order=order)).flatten(order=order)
cn1 = np.zeros(((n-1)*ns,len(noise1[0,:])))
for j in range(len(noise1[0,:])):
cn1[:,j] = np.dot(p,np.reshape(noise1[:,j],(-1,ns),order=order)).flatten(order=order)
cn2 = np.zeros(((n-1)*ns,len(noise2[0,:])))
for j in range(len(noise2[0,:])):
cn2[:,j] = np.dot(p,np.reshape(noise2[:,j],(-1,ns),order=order)).flatten(order=order)
else:
if len(ind) != ns:
raise ValueError('please provide nt for each spatial entry')
if order == 'C':
raise ValueError('can only handle missing data when grouped by spatial dimension')
if return_flag!=1:
raise ValueError('getting the p matrix probably will not be useful here')
yr = np.zeros((sum(ind)-ns))
xr = np.zeros((sum(ind)-ns,len(x[0,:])))
cn1 = np.zeros((sum(ind)-ns,len(noise1[0,:])))
cn2 = np.zeros((sum(ind)-ns,len(noise2[0,:])))
q = 0;w=0
for i in range(ns):
n = ind[i]
m = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
yr[q:q+n-1,:] = np.dot(p,y[w:w+n])[:,None]
if len(x[0,:])==1:
xr[q:q+n-1,:] = np.dot(p,x[w:w+n])[:,None]
else:
for j in range(len(x[0,:])):
xr[q:q+n-1,j] = np.dot(p,x[w:w+n,j])
for j in range(len(noise1[0,:])):
cn1[q:q+n-1,j] = np.dot(p,noise1[w:w+n,j])
for j in range(len(noise2[0,:])):
cn2[q:q+n-1,j] = np.dot(p,noise2[w:w+n,j])
w=w+n;q=q+n-1
if return_flag==0:
return (xr,yr,cn1,cn2)
else:
return p
#################################################################################
def rct_mc(C, x0, ne, n1, n2, nmc=10000, flag_plotden=0):
# following Ribes et al. (2013) and accompanying code, uses Monte Carlo
# approach for the residual consistency test
#
# input C is covariance matrix, x0 is response matrix, nmc is the number
# of Monte Carlo simulations, n1 and n2 are the dimensions of the
# climate noise matrices (from the control runs, probably), and ne is
# the number of ensemble members that went into each ensemble mean
#
# returns set of nmc estimates of the test statistic for the residual
# consistency test
import numpy as np
from scipy import linalg
n,m = np.shape(x0)
if np.size(np.shape(x0))>2:
|
if np.size(np.shape(C))>2:
raise ValueError('input C has too many dimensions')
if np.shape(C)[0]!=n or np.shape(C)[1]!=n:
raise ValueError('dimensions of C must be consistent with dimensions of x0')
#if ne is a single value, assume all signals have same ensemble size
# and expand ne to size m
if np.size(ne)==1:
ne = np.repeat(float(ne),m)
else:
ne = np.array(ne,dtype='float')
beta0 = np.ones((m,1))
c12 = np.real(linalg.sqrtm(C))
r2s = np.zeros((nmc,1))
for i in np.arange(nmc):
#simulate obs and response
y = np.dot(x0,beta0) + np.dot(c12,np.random.randn(n,1))
x = x0 + np.dot(c12,np.random.randn(n,m))/(np.ones((n,1))*np.sqrt(ne))
x1 = x * (np.ones((n,1))*np.sqrt(ne))
#simulate climate noise and calculate the reg cov matrix
cn1 = np.dot(c12,np.random.randn(n,n1))
cn2 = np.dot(c12,np.random.randn(n,n2))
c1reg = regC(cn1)
c1reg12 = np.real(linalg.inv(linalg.sqrtm(c1reg)))
#prewhiten with the reg cov matrix
xc = np.dot(c1reg12,x1)
yc = np.dot(c1reg12,y)
cn2c = np.dot(c1reg12,cn2)
#calculate test statistic for residual consistency test, using TLS
z = np.hstack((xc,yc))
u,s,v1 = linalg.svd(z)
#v = v1.T
lam = s**2
um = np.matrix(u[:,m]).T
r2s[i] = lam[-1]/(np.dot(np.dot(np.dot(um.T,cn2c),cn2c.T),um)/n2)
if flag_plotden==1:
import matplotlib.pyplot as plt
plt.figure
plt.hist(r2s, bins=30)
plt.show()
return r2s
#################################################################################
def tls(x,y,cn1,cn2,ne=1,rof_flag=1,CI_flag=1,alpha=0.10,flag_2S=0,flag_3S=0,flag_4S=0,error_flag=0,RCT_flag=1):
# returns the coefficients from a total least squares regression
# expects x and y to have column data (and should be centered)
#
# x is a nxm matrix of response data where n is time and m is the signal
# y is a nx1 array of the corresponding observations
# ne is the number of members that went into the calculation of the ensemble
# mean; should be of length m; if all m signals have same size ensemble
# a single input is allowed
# cn11 and cn22 are sections of the data (probably from a control run)
# used for calculating the covariance matrix; cn=climate noise
# cn1 is used for calculating the regularized covariance matrix cn2
# (does not need to be regularized) will be used for the residual
# consistency test
# rof_flag is a flag indicating whether to use the ROF approach (1)
# CI_flag is a flag indicating whether the confidence intervals of beta
# should be calculated; default is yes (1)
# alpha is the significance level used to construct the CI on beta
# flag_2S is a flag indicating whether to adjust the output for a combination
# of forcing signals (e.g., if 1 with inputs ALL+NAT, switch to ANT+NAT)
# error_flag determines if errors on x and y will be calculated/output
# RCT_flag is a flag indicating whether to perform the | raise ValueError('input x0 has too many dimensions') | conditional_block |
detatt_mk.py | = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
if ns==1:
xr = np.dot(p,x)
yr = np.dot(p,y)
cn1 = np.dot(p,noise1)
cn2 = np.dot(p,noise2)
else:
yr = np.dot(p,np.reshape(y,(-1,ns),order=order)).flatten(order=order)[:,None]
xr = np.zeros(((n-1)*ns,len(x[0,:])))
for j in range(len(x[0,:])):
xr[:,j] = np.dot(p,np.reshape(x[:,j],(-1,ns),order=order)).flatten(order=order)
cn1 = np.zeros(((n-1)*ns,len(noise1[0,:])))
for j in range(len(noise1[0,:])):
cn1[:,j] = np.dot(p,np.reshape(noise1[:,j],(-1,ns),order=order)).flatten(order=order)
cn2 = np.zeros(((n-1)*ns,len(noise2[0,:])))
for j in range(len(noise2[0,:])):
cn2[:,j] = np.dot(p,np.reshape(noise2[:,j],(-1,ns),order=order)).flatten(order=order)
else:
if len(ind) != ns:
raise ValueError('please provide nt for each spatial entry')
if order == 'C':
raise ValueError('can only handle missing data when grouped by spatial dimension')
if return_flag!=1:
raise ValueError('getting the p matrix probably will not be useful here')
yr = np.zeros((sum(ind)-ns))
xr = np.zeros((sum(ind)-ns,len(x[0,:])))
cn1 = np.zeros((sum(ind)-ns,len(noise1[0,:])))
cn2 = np.zeros((sum(ind)-ns,len(noise2[0,:])))
q = 0;w=0
for i in range(ns):
n = ind[i]
m = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
yr[q:q+n-1,:] = np.dot(p,y[w:w+n])[:,None]
if len(x[0,:])==1:
xr[q:q+n-1,:] = np.dot(p,x[w:w+n])[:,None]
else:
for j in range(len(x[0,:])):
xr[q:q+n-1,j] = np.dot(p,x[w:w+n,j])
for j in range(len(noise1[0,:])):
cn1[q:q+n-1,j] = np.dot(p,noise1[w:w+n,j])
for j in range(len(noise2[0,:])):
cn2[q:q+n-1,j] = np.dot(p,noise2[w:w+n,j])
w=w+n;q=q+n-1
if return_flag==0:
return (xr,yr,cn1,cn2)
else:
return p
#################################################################################
def rct_mc(C, x0, ne, n1, n2, nmc=10000, flag_plotden=0):
# following Ribes et al. (2013) and accompanying code, uses Monte Carlo
# approach for the residual consistency test
#
# input C is covariance matrix, x0 is response matrix, nmc is the number
# of Monte Carlo simulations, n1 and n2 are the dimensions of the
# climate noise matrices (from the control runs, probably), and ne is
# the number of ensemble members that went into each ensemble mean
#
# returns set of nmc estimates of the test statistic for the residual
# consistency test
import numpy as np
from scipy import linalg
n,m = np.shape(x0)
if np.size(np.shape(x0))>2:
raise ValueError('input x0 has too many dimensions')
if np.size(np.shape(C))>2:
raise ValueError('input C has too many dimensions')
if np.shape(C)[0]!=n or np.shape(C)[1]!=n:
raise ValueError('dimensions of C must be consistent with dimensions of x0')
#if ne is a single value, assume all signals have same ensemble size
# and expand ne to size m
if np.size(ne)==1:
ne = np.repeat(float(ne),m)
else:
ne = np.array(ne,dtype='float')
beta0 = np.ones((m,1))
c12 = np.real(linalg.sqrtm(C))
r2s = np.zeros((nmc,1))
for i in np.arange(nmc):
#simulate obs and response
y = np.dot(x0,beta0) + np.dot(c12,np.random.randn(n,1))
x = x0 + np.dot(c12,np.random.randn(n,m))/(np.ones((n,1))*np.sqrt(ne))
x1 = x * (np.ones((n,1))*np.sqrt(ne))
#simulate climate noise and calculate the reg cov matrix
cn1 = np.dot(c12,np.random.randn(n,n1))
cn2 = np.dot(c12,np.random.randn(n,n2))
c1reg = regC(cn1)
c1reg12 = np.real(linalg.inv(linalg.sqrtm(c1reg)))
#prewhiten with the reg cov matrix
xc = np.dot(c1reg12,x1)
yc = np.dot(c1reg12,y)
cn2c = np.dot(c1reg12,cn2)
#calculate test statistic for residual consistency test, using TLS
z = np.hstack((xc,yc))
u,s,v1 = linalg.svd(z)
#v = v1.T
lam = s**2
um = np.matrix(u[:,m]).T
r2s[i] = lam[-1]/(np.dot(np.dot(np.dot(um.T,cn2c),cn2c.T),um)/n2)
if flag_plotden==1:
import matplotlib.pyplot as plt
plt.figure
plt.hist(r2s, bins=30)
plt.show()
return r2s
#################################################################################
def tls(x,y,cn1,cn2,ne=1,rof_flag=1,CI_flag=1,alpha=0.10,flag_2S=0,flag_3S=0,flag_4S=0,error_flag=0,RCT_flag=1):
# returns the coefficients from a total least squares regression
# expects x and y to have column data (and should be centered)
#
# x is a nxm matrix of response data where n is time and m is the signal
# y is a nx1 array of the corresponding observations
# ne is the number of members that went into the calculation of the ensemble
# mean; should be of length m; if all m signals have same size ensemble
# a single input is allowed
# cn11 and cn22 are sections of the data (probably from a control run)
# used for calculating the covariance matrix; cn=climate noise
# cn1 is used for calculating the regularized covariance matrix cn2
# (does not need to be regularized) will be used for the residual
# consistency test
# rof_flag is a flag indicating whether to use the ROF approach (1)
# CI_flag is a flag indicating whether the confidence intervals of beta
# should be calculated; default is yes (1)
# alpha is the significance level used to construct the CI on beta
# flag_2S is a flag indicating whether to adjust the output for a combination
# of forcing signals (e.g., if 1 with inputs ALL+NAT, switch to ANT+NAT)
# error_flag determines if errors on x and y will be calculated/output
# RCT_flag is a flag indicating whether to perform the residual consistency
# test (1)
#
# References:
# Ribes et al. (2013), Allen and Stott (2003), Van Huffel and
# Vandewalle (1991)
# And code from Ribes (in scilab) and Feng (in R)
| import numpy as np
from scipy import linalg, stats
import warnings
if np.shape(x)[0] != np.shape(y)[0]:
raise ValueError('x and y must have same first dimension')
n,m = np.shape(x) #time dimension, number of signals
if np.shape(cn1)[0]!=n:
raise ValueError('dimension mismatch: x and cn1')
p1 = np.shape(cn1)[1]
if np.shape(cn2)[0]!=n:
raise ValueError('dimension mismatch: x and cn2')
p2 = np.shape(cn2)[1]
#if ne is a single value, assume all signals have same ensemble size
# and expand ne to size m | identifier_body |
|
detatt_mk.py | (xL, center_flag=1):
# given the control data xL formed into a nxp matrix, calculates the
# regularized covariance matrix cl_hat
# see Ribes et al. (2009) following Ledoit and Wolf (2004)
#
# gives the option to center the input matrix first (default); would set
# flag to 0 if input matrix has already been multiplied by the projection
# matrix to guarantee full rank of the cov matrix
import numpy as np
if np.size(np.shape(xL))>2:
raise ValueError('input data has too many dimensions')
n,p = np.shape(xL)
#center the input data
if center_flag == 1:
xLc = xL - np.tile(np.mean(xL,axis=0),(n,1))
else:
xLc = xL
#calculate covariance matrix estimate
c_hat = np.dot(xLc,xLc.T)/p
v_hat = np.trace(c_hat)/n
y1 = c_hat - v_hat*np.eye(n)
del_hat2 = np.trace(np.dot(y1.T,y1))/n
y2a = 0
for i in np.arange(p):
w = xLc[:,i]
w = np.expand_dims(w,axis=1)
y2b = np.dot(w,w.T) - c_hat
y2c = np.trace(np.dot(y2b.T, y2b))/n
y2a += y2c
y2 = y2a/(p**2)
beta_hat2 = min(del_hat2,y2)
alpha_hat2 = del_hat2 - beta_hat2
gamma_hat = alpha_hat2/del_hat2
rho_hat = beta_hat2*v_hat/del_hat2
cl_hat = gamma_hat*c_hat + rho_hat*np.eye(n)
return cl_hat
#################################################################################
def reduce_dim(x,y,noise1,noise2,ns=1,ind=None,return_flag=0,order='F'):
#reduces the dimension of the input variables so Cov matrix will be full-rank
#used when the data are centered (because one entry is linear combination
# of others)
#
#ns is the length of the spatial dimension (default is 1)
#assumes order s=1,t=1; s=1,t=2;...; s=2,t=1; s=2,t=2;...
# if grouped by time instead, change order to 'C' (only if no missing data)
#
#if any spatial dimensions are missing time steps, supply ind with list of
#number of time steps for each spatial dimensions
#
#from Ribes code and ECOF code
import numpy as np
from scipy import linalg
n1 = np.size(y) #assumes y is a single column of data (or row, I guess)
if np.mod(float(n1),ns)!= 0:
if ind == None:
raise ValueError('check dimensions or supply indices')
if ind == None:
n = int(n1/ns)
m = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
if ns==1:
xr = np.dot(p,x)
yr = np.dot(p,y)
cn1 = np.dot(p,noise1)
cn2 = np.dot(p,noise2)
else:
yr = np.dot(p,np.reshape(y,(-1,ns),order=order)).flatten(order=order)[:,None]
xr = np.zeros(((n-1)*ns,len(x[0,:])))
for j in range(len(x[0,:])):
xr[:,j] = np.dot(p,np.reshape(x[:,j],(-1,ns),order=order)).flatten(order=order)
cn1 = np.zeros(((n-1)*ns,len(noise1[0,:])))
for j in range(len(noise1[0,:])):
cn1[:,j] = np.dot(p,np.reshape(noise1[:,j],(-1,ns),order=order)).flatten(order=order)
cn2 = np.zeros(((n-1)*ns,len(noise2[0,:])))
for j in range(len(noise2[0,:])):
cn2[:,j] = np.dot(p,np.reshape(noise2[:,j],(-1,ns),order=order)).flatten(order=order)
else:
if len(ind) != ns:
raise ValueError('please provide nt for each spatial entry')
if order == 'C':
raise ValueError('can only handle missing data when grouped by spatial dimension')
if return_flag!=1:
raise ValueError('getting the p matrix probably will not be useful here')
yr = np.zeros((sum(ind)-ns))
xr = np.zeros((sum(ind)-ns,len(x[0,:])))
cn1 = np.zeros((sum(ind)-ns,len(noise1[0,:])))
cn2 = np.zeros((sum(ind)-ns,len(noise2[0,:])))
q = 0;w=0
for i in range(ns):
n = ind[i]
m = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
yr[q:q+n-1,:] = np.dot(p,y[w:w+n])[:,None]
if len(x[0,:])==1:
xr[q:q+n-1,:] = np.dot(p,x[w:w+n])[:,None]
else:
for j in range(len(x[0,:])):
xr[q:q+n-1,j] = np.dot(p,x[w:w+n,j])
for j in range(len(noise1[0,:])):
cn1[q:q+n-1,j] = np.dot(p,noise1[w:w+n,j])
for j in range(len(noise2[0,:])):
cn2[q:q+n-1,j] = np.dot(p,noise2[w:w+n,j])
w=w+n;q=q+n-1
if return_flag==0:
return (xr,yr,cn1,cn2)
else:
return p
#################################################################################
def rct_mc(C, x0, ne, n1, n2, nmc=10000, flag_plotden=0):
# following Ribes et al. (2013) and accompanying code, uses Monte Carlo
# approach for the residual consistency test
#
# input C is covariance matrix, x0 is response matrix, nmc is the number
# of Monte Carlo simulations, n1 and n2 are the dimensions of the
# climate noise matrices (from the control runs, probably), and ne is
# the number of ensemble members that went into each ensemble mean
#
# returns set of nmc estimates of the test statistic for the residual
# consistency test
import numpy as np
from scipy import linalg
n,m = np.shape(x0)
if np.size(np.shape(x0))>2:
raise ValueError('input x0 has too many dimensions')
if np.size(np.shape(C))>2:
raise ValueError('input C has too many dimensions')
if np.shape(C)[0]!=n or np.shape(C)[1]!=n:
raise ValueError('dimensions of C must be consistent with dimensions of x0')
#if ne is a single value, assume all signals have same ensemble size
# and expand ne to size m
if np.size(ne)==1:
ne = np.repeat(float(ne),m)
else:
ne = np.array(ne,dtype='float')
beta0 = np.ones((m,1))
c12 = np.real(linalg.sqrtm(C))
r2s = np.zeros((nmc,1))
for i in np.arange(nmc):
#simulate obs and response
y = np.dot(x0,beta0) + np.dot(c12,np.random.randn(n,1))
x = x0 + np.dot(c12,np.random.randn(n,m))/(np.ones((n,1))*np.sqrt(ne))
x1 = x * (np.ones((n,1))*np.sqrt(ne))
#simulate climate noise and calculate the reg cov matrix
cn1 = np.dot(c12,np.random.randn(n,n1))
cn2 = np.dot(c12,np.random.randn(n,n2))
c1reg = regC(cn1)
c1reg12 = np.real(linalg.inv(linalg.sqrtm(c1reg)))
#prewhiten with the reg cov matrix
xc = np.dot(c1reg12,x1)
yc = np.dot(c1reg12,y)
cn2c = np.dot(c1reg12,cn2)
#calculate test statistic for residual consistency test, using TLS
z = np.hstack((xc,yc))
u,s,v1 = linalg.svd(z)
#v = v1.T
lam = s**2
um = np.matrix(u[:,m]).T
r2s[i] = lam[-1]/ | regC | identifier_name |
|
detatt_mk.py | #
# to use:
#import detatt_mk as da
#
#reduce dimensions
#model response ensemble mean xem2 and obs y2 should be column data (and centered), so use [:,None] if a single forcing
#xcl1 and xcl2 are the climate noise array (n_years x n_realizations) split into two parts
#(xr,yr,cn1,cn2) = da.reduce_dim(xem2[:,None],y2[:,None],xcl1,xcl2)
#
#run d/a; see code for flag options
#z = da.tls(xr,yr,ne=ne,cn1=cn1,cn2=cn2)
#
#
# References:
# Allen, M. R., and S. F. B. Tett, 1999: Checking for model consistency in optimal fingerprinting. Climate Dyn., 15, 419-434, doi:10.1007/s003820050291.
# Allen, M. R., and P. A. Stott, 2003: Estimating signal amplitudes in optimal fingerprinting, part I: Theory. Climate Dyn., 21, 477-491, doi:10.1007/s00382-003-0313-9.
# Ribes, A., S. Planton, and L. Terray, 2013: Application of regularised optimal fingerprinting to attribution. Part I: Method, properties and idealised analysis. Climate Dyn., 41, 2817-2836, doi:10.1007/s00382-013-1735-7.
#################################################################################
def regC(xL, center_flag=1):
# given the control data xL formed into a nxp matrix, calculates the
# regularized covariance matrix cl_hat
# see Ribes et al. (2009) following Ledoit and Wolf (2004)
#
# gives the option to center the input matrix first (default); would set
# flag to 0 if input matrix has already been multiplied by the projection
# matrix to guarantee full rank of the cov matrix
import numpy as np
if np.size(np.shape(xL))>2:
raise ValueError('input data has too many dimensions')
n,p = np.shape(xL)
#center the input data
if center_flag == 1:
xLc = xL - np.tile(np.mean(xL,axis=0),(n,1))
else:
xLc = xL
#calculate covariance matrix estimate
c_hat = np.dot(xLc,xLc.T)/p
v_hat = np.trace(c_hat)/n
y1 = c_hat - v_hat*np.eye(n)
del_hat2 = np.trace(np.dot(y1.T,y1))/n
y2a = 0
for i in np.arange(p):
w = xLc[:,i]
w = np.expand_dims(w,axis=1)
y2b = np.dot(w,w.T) - c_hat
y2c = np.trace(np.dot(y2b.T, y2b))/n
y2a += y2c
y2 = y2a/(p**2)
beta_hat2 = min(del_hat2,y2)
alpha_hat2 = del_hat2 - beta_hat2
gamma_hat = alpha_hat2/del_hat2
rho_hat = beta_hat2*v_hat/del_hat2
cl_hat = gamma_hat*c_hat + rho_hat*np.eye(n)
return cl_hat
#################################################################################
def reduce_dim(x,y,noise1,noise2,ns=1,ind=None,return_flag=0,order='F'):
#reduces the dimension of the input variables so Cov matrix will be full-rank
#used when the data are centered (because one entry is linear combination
# of others)
#
#ns is the length of the spatial dimension (default is 1)
#assumes order s=1,t=1; s=1,t=2;...; s=2,t=1; s=2,t=2;...
# if grouped by time instead, change order to 'C' (only if no missing data)
#
#if any spatial dimensions are missing time steps, supply ind with list of
#number of time steps for each spatial dimensions
#
#from Ribes code and ECOF code
import numpy as np
from scipy import linalg
n1 = np.size(y) #assumes y is a single column of data (or row, I guess)
if np.mod(float(n1),ns)!= 0:
if ind == None:
raise ValueError('check dimensions or supply indices')
if ind == None:
n = int(n1/ns)
m = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
if ns==1:
xr = np.dot(p,x)
yr = np.dot(p,y)
cn1 = np.dot(p,noise1)
cn2 = np.dot(p,noise2)
else:
yr = np.dot(p,np.reshape(y,(-1,ns),order=order)).flatten(order=order)[:,None]
xr = np.zeros(((n-1)*ns,len(x[0,:])))
for j in range(len(x[0,:])):
xr[:,j] = np.dot(p,np.reshape(x[:,j],(-1,ns),order=order)).flatten(order=order)
cn1 = np.zeros(((n-1)*ns,len(noise1[0,:])))
for j in range(len(noise1[0,:])):
cn1[:,j] = np.dot(p,np.reshape(noise1[:,j],(-1,ns),order=order)).flatten(order=order)
cn2 = np.zeros(((n-1)*ns,len(noise2[0,:])))
for j in range(len(noise2[0,:])):
cn2[:,j] = np.dot(p,np.reshape(noise2[:,j],(-1,ns),order=order)).flatten(order=order)
else:
if len(ind) != ns:
raise ValueError('please provide nt for each spatial entry')
if order == 'C':
raise ValueError('can only handle missing data when grouped by spatial dimension')
if return_flag!=1:
raise ValueError('getting the p matrix probably will not be useful here')
yr = np.zeros((sum(ind)-ns))
xr = np.zeros((sum(ind)-ns,len(x[0,:])))
cn1 = np.zeros((sum(ind)-ns,len(noise1[0,:])))
cn2 = np.zeros((sum(ind)-ns,len(noise2[0,:])))
q = 0;w=0
for i in range(ns):
n = ind[i]
m = np.eye(n) - 1./n
u,s,v1 = linalg.svd(m)
p = u[:,:-1].T
yr[q:q+n-1,:] = np.dot(p,y[w:w+n])[:,None]
if len(x[0,:])==1:
xr[q:q+n-1,:] = np.dot(p,x[w:w+n])[:,None]
else:
for j in range(len(x[0,:])):
xr[q:q+n-1,j] = np.dot(p,x[w:w+n,j])
for j in range(len(noise1[0,:])):
cn1[q:q+n-1,j] = np.dot(p,noise1[w:w+n,j])
for j in range(len(noise2[0,:])):
cn2[q:q+n-1,j] = np.dot(p,noise2[w:w+n,j])
w=w+n;q=q+n-1
if return_flag==0:
return (xr,yr,cn1,cn2)
else:
return p
#################################################################################
def rct_mc(C, x0, ne, n1, n2, nmc=10000, flag_plotden=0):
# following Ribes et al. (2013) and accompanying code, uses Monte Carlo
# approach for the residual consistency test
#
# input C is covariance matrix, x0 is response matrix, nmc is the number
# of Monte Carlo simulations, n1 and n2 are the dimensions of the
# climate noise matrices (from the control runs, probably), and ne is
# the number of ensemble members that went into each ensemble mean
#
# returns set of nmc estimates of the test statistic for the residual
# consistency test
import numpy as np
from scipy import linalg
n,m = np.shape(x0)
if np.size(np.shape(x0))>2:
raise ValueError('input x0 has too many dimensions')
if np.size(np.shape(C))>2:
raise ValueError('input C has too many dimensions')
if np.shape(C)[0]!= | # Megan Kirchmeier-Young
#
# Many functions adapted from other code as noted. | random_line_split |
|
dtobuilder.go | .MetricFamily),
}
return b.Build()
}
type builder struct {
Samples map[string]Sample
Exemplars map[string]SeriesExemplar
Metadata map[string]metadata.Metadata
families []*dto.MetricFamily
familyLookup map[string]*dto.MetricFamily
}
// Build converts the dtoBuilder's Samples, Exemplars, and Metadata into a set
// of []*dto.MetricFamily.
func (b *builder) Build() []*dto.MetricFamily {
// *dto.MetricFamily represents a set of samples for a given family of
// metrics. All metrics with the same __name__ belong to the same family.
//
// Each *dto.MetricFamily has a set of *dto.Metric, which contain individual
// samples within that family. The *dto.Metric is where non-__name__ labels
// are kept.
//
// *dto.Metrics can represent counters, gauges, summaries, histograms, and
// untyped values.
//
// In the case of a summary, the *dto.Metric contains multiple samples,
// holding each quantile, the _count, and the _sum. Similarly for histograms,
// the *dto.Metric contains each bucket, the _count, and the _sum.
//
// Because *dto.Metrics for summaries and histograms contain multiple
// samples, Build must roll up individually recorded samples into the
// appropriate *dto.Metric. See buildMetricsFromSamples for more information.
// We *must* do things in the following order:
//
// 1. Populate the families from metadata so we know what fields in
// *dto.Metric to set.
// 2. Populate *dto.Metric values from provided samples.
// 3. Assign exemplars to *dto.Metrics as appropriate.
b.buildFamiliesFromMetadata()
b.buildMetricsFromSamples()
b.injectExemplars()
// Sort all the data before returning.
sortMetricFamilies(b.families)
return b.families
}
// buildFamiliesFromMetadata populates the list of families based on the
// metadata known to the dtoBuilder. familyLookup will be updated for all
// metrics which map to the same family.
//
// In the case of summaries and histograms, multiple metrics map to the same
// family (the bucket/quantile, the _sum, and the _count metrics).
func (b *builder) buildFamiliesFromMetadata() {
for familyName, m := range b.Metadata {
mt := textParseToMetricType(m.Type)
mf := &dto.MetricFamily{
Name: pointer.String(familyName),
Type: &mt,
}
if m.Help != "" {
mf.Help = pointer.String(m.Help)
}
b.families = append(b.families, mf)
// Determine how to populate the lookup table.
switch mt {
case dto.MetricType_SUMMARY:
// Summaries include metrics with the family name (for quantiles),
// followed by _sum and _count suffixes.
b.familyLookup[familyName] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
case dto.MetricType_HISTOGRAM:
// Histograms include metrics for _bucket, _sum, and _count suffixes.
b.familyLookup[familyName+"_bucket"] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
default:
// Everything else matches the family name exactly.
b.familyLookup[familyName] = mf
}
}
}
func textParseToMetricType(tp textparse.MetricType) dto.MetricType {
switch tp {
case textparse.MetricTypeCounter:
return dto.MetricType_COUNTER
case textparse.MetricTypeGauge:
return dto.MetricType_GAUGE
case textparse.MetricTypeHistogram:
return dto.MetricType_HISTOGRAM
case textparse.MetricTypeSummary:
return dto.MetricType_SUMMARY
default:
// There are other values for m.Type, but they're all
// OpenMetrics-specific and we're only converting into the Prometheus
// exposition format.
return dto.MetricType_UNTYPED
}
}
// buildMetricsFromSamples populates *dto.Metrics. If the MetricFamily doesn't
// exist for a given sample, a new one is created.
func (b *builder) buildMetricsFromSamples() {
for _, sample := range b.Samples {
// Get or create the metric family.
metricName := sample.Labels.Get(model.MetricNameLabel)
mf := b.getOrCreateMetricFamily(metricName)
// Retrieve the *dto.Metric based on labels.
m := getOrCreateMetric(mf, sample.Labels)
if sample.PrintTimestamp {
m.TimestampMs = pointer.Int64(sample.Timestamp)
}
switch familyType(mf) {
case dto.MetricType_COUNTER:
m.Counter = &dto.Counter{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_GAUGE:
m.Gauge = &dto.Gauge{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_SUMMARY:
if m.Summary == nil {
m.Summary = &dto.Summary{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Summary.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Summary.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName():
quantile, err := strconv.ParseFloat(sample.Labels.Get(model.QuantileLabel), 64)
if err != nil {
continue
}
m.Summary.Quantile = append(m.Summary.Quantile, &dto.Quantile{
Quantile: &quantile,
Value: pointer.Float64(sample.Value),
})
}
case dto.MetricType_UNTYPED:
m.Untyped = &dto.Untyped{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_HISTOGRAM:
if m.Histogram == nil {
m.Histogram = &dto.Histogram{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Histogram.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Histogram.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName()+"_bucket":
boundary, err := strconv.ParseFloat(sample.Labels.Get(model.BucketLabel), 64)
if err != nil {
continue
}
count := uint64(sample.Value)
m.Histogram.Bucket = append(m.Histogram.Bucket, &dto.Bucket{
UpperBound: &boundary,
CumulativeCount: &count,
})
}
}
}
}
func (b *builder) getOrCreateMetricFamily(familyName string) *dto.MetricFamily {
mf, ok := b.familyLookup[familyName]
if ok {
return mf
}
mt := dto.MetricType_UNTYPED
mf = &dto.MetricFamily{
Name: &familyName,
Type: &mt,
}
b.families = append(b.families, mf)
b.familyLookup[familyName] = mf
return mf
}
func getOrCreateMetric(mf *dto.MetricFamily, l labels.Labels) *dto.Metric {
metricLabels := toLabelPairs(familyType(mf), l)
for _, check := range mf.Metric {
if labelPairsEqual(check.Label, metricLabels) {
return check
}
}
m := &dto.Metric{
Label: metricLabels,
}
mf.Metric = append(mf.Metric, m)
return m
}
// toLabelPairs converts labels.Labels into []*dto.LabelPair. The __name__
// label is always dropped, since the metric name is retrieved from the family
// name instead.
//
// The quantile label is dropped for summaries, and the le label is dropped for
// histograms.
func toLabelPairs(mt dto.MetricType, ls labels.Labels) []*dto.LabelPair {
res := make([]*dto.LabelPair, 0, len(ls))
for _, l := range ls {
if l.Name == model.MetricNameLabel | else if l.Name == model.QuantileLabel && mt == dto.MetricType_SUMMARY {
continue
} else if l.Name == model.BucketLabel && mt == dto.MetricType_HISTOGRAM {
continue
}
res = append(res, &dto.LabelPair{
Name: pointer.String(l.Name),
Value: pointer.String(l.Value),
})
}
sort.Slice(res, func(i, j int) bool {
switch {
case *res[i].Name < *res[j].Name:
return true
case *res[i].Value < *res[j].Value:
return true
default:
return false
}
})
return res
}
func labelPairsEqual(a, b []*dto.LabelPair) bool {
if len(a | {
continue
} | conditional_block |
dtobuilder.go | .MetricFamily),
}
return b.Build()
}
type builder struct {
Samples map[string]Sample
Exemplars map[string]SeriesExemplar
Metadata map[string]metadata.Metadata
families []*dto.MetricFamily
familyLookup map[string]*dto.MetricFamily
}
// Build converts the dtoBuilder's Samples, Exemplars, and Metadata into a set
// of []*dto.MetricFamily.
func (b *builder) Build() []*dto.MetricFamily {
// *dto.MetricFamily represents a set of samples for a given family of
// metrics. All metrics with the same __name__ belong to the same family.
//
// Each *dto.MetricFamily has a set of *dto.Metric, which contain individual
// samples within that family. The *dto.Metric is where non-__name__ labels
// are kept.
//
// *dto.Metrics can represent counters, gauges, summaries, histograms, and
// untyped values.
//
// In the case of a summary, the *dto.Metric contains multiple samples,
// holding each quantile, the _count, and the _sum. Similarly for histograms,
// the *dto.Metric contains each bucket, the _count, and the _sum.
//
// Because *dto.Metrics for summaries and histograms contain multiple
// samples, Build must roll up individually recorded samples into the
// appropriate *dto.Metric. See buildMetricsFromSamples for more information.
// We *must* do things in the following order:
//
// 1. Populate the families from metadata so we know what fields in
// *dto.Metric to set.
// 2. Populate *dto.Metric values from provided samples.
// 3. Assign exemplars to *dto.Metrics as appropriate.
b.buildFamiliesFromMetadata()
b.buildMetricsFromSamples()
b.injectExemplars()
// Sort all the data before returning.
sortMetricFamilies(b.families)
return b.families
}
// buildFamiliesFromMetadata populates the list of families based on the
// metadata known to the dtoBuilder. familyLookup will be updated for all
// metrics which map to the same family.
//
// In the case of summaries and histograms, multiple metrics map to the same
// family (the bucket/quantile, the _sum, and the _count metrics).
func (b *builder) buildFamiliesFromMetadata() {
for familyName, m := range b.Metadata {
mt := textParseToMetricType(m.Type)
mf := &dto.MetricFamily{
Name: pointer.String(familyName),
Type: &mt,
}
if m.Help != "" {
mf.Help = pointer.String(m.Help)
}
b.families = append(b.families, mf)
// Determine how to populate the lookup table.
switch mt {
case dto.MetricType_SUMMARY:
// Summaries include metrics with the family name (for quantiles),
// followed by _sum and _count suffixes.
b.familyLookup[familyName] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
case dto.MetricType_HISTOGRAM:
// Histograms include metrics for _bucket, _sum, and _count suffixes.
b.familyLookup[familyName+"_bucket"] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
default:
// Everything else matches the family name exactly.
b.familyLookup[familyName] = mf
}
}
}
func textParseToMetricType(tp textparse.MetricType) dto.MetricType {
switch tp {
case textparse.MetricTypeCounter:
return dto.MetricType_COUNTER
case textparse.MetricTypeGauge:
return dto.MetricType_GAUGE
case textparse.MetricTypeHistogram:
return dto.MetricType_HISTOGRAM
case textparse.MetricTypeSummary:
return dto.MetricType_SUMMARY
default:
// There are other values for m.Type, but they're all
// OpenMetrics-specific and we're only converting into the Prometheus
// exposition format.
return dto.MetricType_UNTYPED
}
}
// buildMetricsFromSamples populates *dto.Metrics. If the MetricFamily doesn't
// exist for a given sample, a new one is created.
func (b *builder) buildMetricsFromSamples() {
for _, sample := range b.Samples {
// Get or create the metric family.
metricName := sample.Labels.Get(model.MetricNameLabel)
mf := b.getOrCreateMetricFamily(metricName)
// Retrieve the *dto.Metric based on labels.
m := getOrCreateMetric(mf, sample.Labels)
if sample.PrintTimestamp {
m.TimestampMs = pointer.Int64(sample.Timestamp)
}
switch familyType(mf) {
case dto.MetricType_COUNTER:
m.Counter = &dto.Counter{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_GAUGE:
m.Gauge = &dto.Gauge{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_SUMMARY:
if m.Summary == nil {
m.Summary = &dto.Summary{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Summary.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Summary.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName():
quantile, err := strconv.ParseFloat(sample.Labels.Get(model.QuantileLabel), 64)
if err != nil {
continue
}
m.Summary.Quantile = append(m.Summary.Quantile, &dto.Quantile{
Quantile: &quantile,
Value: pointer.Float64(sample.Value),
})
}
case dto.MetricType_UNTYPED:
m.Untyped = &dto.Untyped{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_HISTOGRAM:
if m.Histogram == nil {
m.Histogram = &dto.Histogram{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Histogram.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Histogram.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName()+"_bucket":
boundary, err := strconv.ParseFloat(sample.Labels.Get(model.BucketLabel), 64)
if err != nil {
continue
}
count := uint64(sample.Value)
m.Histogram.Bucket = append(m.Histogram.Bucket, &dto.Bucket{
UpperBound: &boundary,
CumulativeCount: &count,
})
}
}
}
}
func (b *builder) getOrCreateMetricFamily(familyName string) *dto.MetricFamily {
mf, ok := b.familyLookup[familyName]
if ok {
return mf
}
mt := dto.MetricType_UNTYPED
mf = &dto.MetricFamily{
Name: &familyName,
Type: &mt,
}
b.families = append(b.families, mf)
b.familyLookup[familyName] = mf
return mf
}
func getOrCreateMetric(mf *dto.MetricFamily, l labels.Labels) *dto.Metric {
metricLabels := toLabelPairs(familyType(mf), l)
for _, check := range mf.Metric {
if labelPairsEqual(check.Label, metricLabels) {
return check
}
}
m := &dto.Metric{
Label: metricLabels,
}
mf.Metric = append(mf.Metric, m)
return m
}
// toLabelPairs converts labels.Labels into []*dto.LabelPair. The __name__
// label is always dropped, since the metric name is retrieved from the family
// name instead.
//
// The quantile label is dropped for summaries, and the le label is dropped for
// histograms.
func toLabelPairs(mt dto.MetricType, ls labels.Labels) []*dto.LabelPair {
res := make([]*dto.LabelPair, 0, len(ls))
for _, l := range ls {
if l.Name == model.MetricNameLabel {
continue
} else if l.Name == model.QuantileLabel && mt == dto.MetricType_SUMMARY {
continue
} else if l.Name == model.BucketLabel && mt == dto.MetricType_HISTOGRAM {
continue
}
res = append(res, &dto.LabelPair{ | }
sort.Slice(res, func(i, j int) bool {
switch {
case *res[i].Name < *res[j].Name:
return true
case *res[i].Value < *res[j].Value:
return true
default:
return false
}
})
return res
}
func labelPairsEqual(a, b []*dto.LabelPair) bool {
if len(a) | Name: pointer.String(l.Name),
Value: pointer.String(l.Value),
}) | random_line_split |
dtobuilder.go | .MetricFamily),
}
return b.Build()
}
type builder struct {
Samples map[string]Sample
Exemplars map[string]SeriesExemplar
Metadata map[string]metadata.Metadata
families []*dto.MetricFamily
familyLookup map[string]*dto.MetricFamily
}
// Build converts the dtoBuilder's Samples, Exemplars, and Metadata into a set
// of []*dto.MetricFamily.
func (b *builder) Build() []*dto.MetricFamily {
// *dto.MetricFamily represents a set of samples for a given family of
// metrics. All metrics with the same __name__ belong to the same family.
//
// Each *dto.MetricFamily has a set of *dto.Metric, which contain individual
// samples within that family. The *dto.Metric is where non-__name__ labels
// are kept.
//
// *dto.Metrics can represent counters, gauges, summaries, histograms, and
// untyped values.
//
// In the case of a summary, the *dto.Metric contains multiple samples,
// holding each quantile, the _count, and the _sum. Similarly for histograms,
// the *dto.Metric contains each bucket, the _count, and the _sum.
//
// Because *dto.Metrics for summaries and histograms contain multiple
// samples, Build must roll up individually recorded samples into the
// appropriate *dto.Metric. See buildMetricsFromSamples for more information.
// We *must* do things in the following order:
//
// 1. Populate the families from metadata so we know what fields in
// *dto.Metric to set.
// 2. Populate *dto.Metric values from provided samples.
// 3. Assign exemplars to *dto.Metrics as appropriate.
b.buildFamiliesFromMetadata()
b.buildMetricsFromSamples()
b.injectExemplars()
// Sort all the data before returning.
sortMetricFamilies(b.families)
return b.families
}
// buildFamiliesFromMetadata populates the list of families based on the
// metadata known to the dtoBuilder. familyLookup will be updated for all
// metrics which map to the same family.
//
// In the case of summaries and histograms, multiple metrics map to the same
// family (the bucket/quantile, the _sum, and the _count metrics).
func (b *builder) buildFamiliesFromMetadata() {
for familyName, m := range b.Metadata {
mt := textParseToMetricType(m.Type)
mf := &dto.MetricFamily{
Name: pointer.String(familyName),
Type: &mt,
}
if m.Help != "" {
mf.Help = pointer.String(m.Help)
}
b.families = append(b.families, mf)
// Determine how to populate the lookup table.
switch mt {
case dto.MetricType_SUMMARY:
// Summaries include metrics with the family name (for quantiles),
// followed by _sum and _count suffixes.
b.familyLookup[familyName] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
case dto.MetricType_HISTOGRAM:
// Histograms include metrics for _bucket, _sum, and _count suffixes.
b.familyLookup[familyName+"_bucket"] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
default:
// Everything else matches the family name exactly.
b.familyLookup[familyName] = mf
}
}
}
func textParseToMetricType(tp textparse.MetricType) dto.MetricType {
switch tp {
case textparse.MetricTypeCounter:
return dto.MetricType_COUNTER
case textparse.MetricTypeGauge:
return dto.MetricType_GAUGE
case textparse.MetricTypeHistogram:
return dto.MetricType_HISTOGRAM
case textparse.MetricTypeSummary:
return dto.MetricType_SUMMARY
default:
// There are other values for m.Type, but they're all
// OpenMetrics-specific and we're only converting into the Prometheus
// exposition format.
return dto.MetricType_UNTYPED
}
}
// buildMetricsFromSamples populates *dto.Metrics. If the MetricFamily doesn't
// exist for a given sample, a new one is created.
func (b *builder) buildMetricsFromSamples() {
for _, sample := range b.Samples {
// Get or create the metric family.
metricName := sample.Labels.Get(model.MetricNameLabel)
mf := b.getOrCreateMetricFamily(metricName)
// Retrieve the *dto.Metric based on labels.
m := getOrCreateMetric(mf, sample.Labels)
if sample.PrintTimestamp {
m.TimestampMs = pointer.Int64(sample.Timestamp)
}
switch familyType(mf) {
case dto.MetricType_COUNTER:
m.Counter = &dto.Counter{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_GAUGE:
m.Gauge = &dto.Gauge{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_SUMMARY:
if m.Summary == nil {
m.Summary = &dto.Summary{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Summary.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Summary.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName():
quantile, err := strconv.ParseFloat(sample.Labels.Get(model.QuantileLabel), 64)
if err != nil {
continue
}
m.Summary.Quantile = append(m.Summary.Quantile, &dto.Quantile{
Quantile: &quantile,
Value: pointer.Float64(sample.Value),
})
}
case dto.MetricType_UNTYPED:
m.Untyped = &dto.Untyped{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_HISTOGRAM:
if m.Histogram == nil {
m.Histogram = &dto.Histogram{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Histogram.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Histogram.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName()+"_bucket":
boundary, err := strconv.ParseFloat(sample.Labels.Get(model.BucketLabel), 64)
if err != nil {
continue
}
count := uint64(sample.Value)
m.Histogram.Bucket = append(m.Histogram.Bucket, &dto.Bucket{
UpperBound: &boundary,
CumulativeCount: &count,
})
}
}
}
}
func (b *builder) getOrCreateMetricFamily(familyName string) *dto.MetricFamily {
mf, ok := b.familyLookup[familyName]
if ok {
return mf
}
mt := dto.MetricType_UNTYPED
mf = &dto.MetricFamily{
Name: &familyName,
Type: &mt,
}
b.families = append(b.families, mf)
b.familyLookup[familyName] = mf
return mf
}
func getOrCreateMetric(mf *dto.MetricFamily, l labels.Labels) *dto.Metric {
metricLabels := toLabelPairs(familyType(mf), l)
for _, check := range mf.Metric {
if labelPairsEqual(check.Label, metricLabels) {
return check
}
}
m := &dto.Metric{
Label: metricLabels,
}
mf.Metric = append(mf.Metric, m)
return m
}
// toLabelPairs converts labels.Labels into []*dto.LabelPair. The __name__
// label is always dropped, since the metric name is retrieved from the family
// name instead.
//
// The quantile label is dropped for summaries, and the le label is dropped for
// histograms.
func toLabelPairs(mt dto.MetricType, ls labels.Labels) []*dto.LabelPair {
res := make([]*dto.LabelPair, 0, len(ls))
for _, l := range ls {
if l.Name == model.MetricNameLabel {
continue
} else if l.Name == model.QuantileLabel && mt == dto.MetricType_SUMMARY {
continue
} else if l.Name == model.BucketLabel && mt == dto.MetricType_HISTOGRAM {
continue
}
res = append(res, &dto.LabelPair{
Name: pointer.String(l.Name),
Value: pointer.String(l.Value),
})
}
sort.Slice(res, func(i, j int) bool {
switch {
case *res[i].Name < *res[j].Name:
return true
case *res[i].Value < *res[j].Value:
return true
default:
return false
}
})
return res
}
func | (a, b []*dto.LabelPair) bool {
if len(a | labelPairsEqual | identifier_name |
dtobuilder.go | MARY:
// Summaries include metrics with the family name (for quantiles),
// followed by _sum and _count suffixes.
b.familyLookup[familyName] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
case dto.MetricType_HISTOGRAM:
// Histograms include metrics for _bucket, _sum, and _count suffixes.
b.familyLookup[familyName+"_bucket"] = mf
b.familyLookup[familyName+"_sum"] = mf
b.familyLookup[familyName+"_count"] = mf
default:
// Everything else matches the family name exactly.
b.familyLookup[familyName] = mf
}
}
}
func textParseToMetricType(tp textparse.MetricType) dto.MetricType {
switch tp {
case textparse.MetricTypeCounter:
return dto.MetricType_COUNTER
case textparse.MetricTypeGauge:
return dto.MetricType_GAUGE
case textparse.MetricTypeHistogram:
return dto.MetricType_HISTOGRAM
case textparse.MetricTypeSummary:
return dto.MetricType_SUMMARY
default:
// There are other values for m.Type, but they're all
// OpenMetrics-specific and we're only converting into the Prometheus
// exposition format.
return dto.MetricType_UNTYPED
}
}
// buildMetricsFromSamples populates *dto.Metrics. If the MetricFamily doesn't
// exist for a given sample, a new one is created.
func (b *builder) buildMetricsFromSamples() {
for _, sample := range b.Samples {
// Get or create the metric family.
metricName := sample.Labels.Get(model.MetricNameLabel)
mf := b.getOrCreateMetricFamily(metricName)
// Retrieve the *dto.Metric based on labels.
m := getOrCreateMetric(mf, sample.Labels)
if sample.PrintTimestamp {
m.TimestampMs = pointer.Int64(sample.Timestamp)
}
switch familyType(mf) {
case dto.MetricType_COUNTER:
m.Counter = &dto.Counter{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_GAUGE:
m.Gauge = &dto.Gauge{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_SUMMARY:
if m.Summary == nil {
m.Summary = &dto.Summary{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Summary.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Summary.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName():
quantile, err := strconv.ParseFloat(sample.Labels.Get(model.QuantileLabel), 64)
if err != nil {
continue
}
m.Summary.Quantile = append(m.Summary.Quantile, &dto.Quantile{
Quantile: &quantile,
Value: pointer.Float64(sample.Value),
})
}
case dto.MetricType_UNTYPED:
m.Untyped = &dto.Untyped{
Value: pointer.Float64(sample.Value),
}
case dto.MetricType_HISTOGRAM:
if m.Histogram == nil {
m.Histogram = &dto.Histogram{}
}
switch {
case metricName == mf.GetName()+"_count":
val := uint64(sample.Value)
m.Histogram.SampleCount = &val
case metricName == mf.GetName()+"_sum":
m.Histogram.SampleSum = pointer.Float64(sample.Value)
case metricName == mf.GetName()+"_bucket":
boundary, err := strconv.ParseFloat(sample.Labels.Get(model.BucketLabel), 64)
if err != nil {
continue
}
count := uint64(sample.Value)
m.Histogram.Bucket = append(m.Histogram.Bucket, &dto.Bucket{
UpperBound: &boundary,
CumulativeCount: &count,
})
}
}
}
}
func (b *builder) getOrCreateMetricFamily(familyName string) *dto.MetricFamily {
mf, ok := b.familyLookup[familyName]
if ok {
return mf
}
mt := dto.MetricType_UNTYPED
mf = &dto.MetricFamily{
Name: &familyName,
Type: &mt,
}
b.families = append(b.families, mf)
b.familyLookup[familyName] = mf
return mf
}
func getOrCreateMetric(mf *dto.MetricFamily, l labels.Labels) *dto.Metric {
metricLabels := toLabelPairs(familyType(mf), l)
for _, check := range mf.Metric {
if labelPairsEqual(check.Label, metricLabels) {
return check
}
}
m := &dto.Metric{
Label: metricLabels,
}
mf.Metric = append(mf.Metric, m)
return m
}
// toLabelPairs converts labels.Labels into []*dto.LabelPair. The __name__
// label is always dropped, since the metric name is retrieved from the family
// name instead.
//
// The quantile label is dropped for summaries, and the le label is dropped for
// histograms.
func toLabelPairs(mt dto.MetricType, ls labels.Labels) []*dto.LabelPair {
res := make([]*dto.LabelPair, 0, len(ls))
for _, l := range ls {
if l.Name == model.MetricNameLabel {
continue
} else if l.Name == model.QuantileLabel && mt == dto.MetricType_SUMMARY {
continue
} else if l.Name == model.BucketLabel && mt == dto.MetricType_HISTOGRAM {
continue
}
res = append(res, &dto.LabelPair{
Name: pointer.String(l.Name),
Value: pointer.String(l.Value),
})
}
sort.Slice(res, func(i, j int) bool {
switch {
case *res[i].Name < *res[j].Name:
return true
case *res[i].Value < *res[j].Value:
return true
default:
return false
}
})
return res
}
func labelPairsEqual(a, b []*dto.LabelPair) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if *a[i].Name != *b[i].Name || *a[i].Value != *b[i].Value {
return false
}
}
return true
}
func familyType(mf *dto.MetricFamily) dto.MetricType {
ty := mf.Type
if ty == nil {
return dto.MetricType_UNTYPED
}
return *ty
}
// injectExemplars populates the exemplars in the various *dto.Metric
// instances. Exemplars are ignored if the parent *dto.MetricFamily doesn't
// support exeplars based on metric type.
func (b *builder) injectExemplars() {
for _, e := range b.Exemplars {
// Get or create the metric family.
exemplarName := e.Labels.Get(model.MetricNameLabel)
mf, ok := b.familyLookup[exemplarName]
if !ok {
// No metric family, which means no corresponding sample; ignore.
continue
}
m := getMetric(mf, e.Labels)
if m == nil {
continue
}
// Only counters and histograms support exemplars.
switch familyType(mf) {
case dto.MetricType_COUNTER:
if m.Counter == nil {
// Sample never added; ignore.
continue
}
m.Counter.Exemplar = convertExemplar(dto.MetricType_COUNTER, e.Exemplar)
case dto.MetricType_HISTOGRAM:
if m.Histogram == nil {
// Sample never added; ignore.
continue
}
switch {
case exemplarName == mf.GetName()+"_bucket":
boundary, err := strconv.ParseFloat(e.Labels.Get(model.BucketLabel), 64)
if err != nil {
continue
}
bucket := findBucket(m.Histogram, boundary)
if bucket == nil {
continue
}
bucket.Exemplar = convertExemplar(dto.MetricType_HISTOGRAM, e.Exemplar)
}
}
}
}
func getMetric(mf *dto.MetricFamily, l labels.Labels) *dto.Metric {
metricLabels := toLabelPairs(familyType(mf), l)
for _, check := range mf.Metric {
if labelPairsEqual(check.Label, metricLabels) {
return check
}
}
return nil
}
func convertExemplar(mt dto.MetricType, e exemplar.Exemplar) *dto.Exemplar | {
res := &dto.Exemplar{
Label: toLabelPairs(mt, e.Labels),
Value: &e.Value,
}
if e.HasTs {
res.Timestamp = timestamppb.New(time.UnixMilli(e.Ts))
}
return res
} | identifier_body |
|
jti-grpc-client-python.py | # As per the .proto file, use 0xFFFFFFFF for all subscription identifiers.
if INVOKE_GET_SUBSCRIPTIONS_FLAG:
logger.info("Invoking 'getTelemetrySubscriptions()' ...")
get_subscriptions_request = agent_pb2.GetSubscriptionsRequest(subscription_id = 0xFFFFFFFF)
get_subscriptions_reply = stub.getTelemetrySubscriptions(get_subscriptions_request)
logger.info(str(get_subscriptions_reply))
logger.info("... Done!\n")
# STEP 7: The telemetrySubscribe() method requires a SubscriptionRequest object as an input, which in turn requires
# a SubscriptionInput object and a list of Path objects as input ... assemble these various objects.
# Setup Collector ...
collector = agent_pb2.Collector(address=COLLECTOR_ADDRESS, port=COLLECTOR_PORT)
logger.debug("Value of 'collector': " + str(collector))
# Use Collector to setup SubscriptionInput ...
subscription_input = agent_pb2.SubscriptionInput(collector_list=[collector])
logger.debug("Value of 'subscription_input':\n" + str(subscription_input))
# Setup Path ...
#path = agent_pb2.Path(path="/junos/system/linecard/interface/", sample_frequency=5000)
#path = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/", sample_frequency=5000)
#path = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/state/", sample_frequency=5000)
#path = agent_pb2.Path(path="/junos/events", sample_frequency=0)
#path = agent_pb2.Path(path="/junos/events/event[id=\'UI_COMMIT\']", sample_frequency=0)
#path = agent_pb2.Path(path="/components/", sample_frequency=5000)
## Multiple Sensor Subscriptions ...
path1 = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/state/", sample_frequency=5000)
path2 = agent_pb2.Path(path="/junos/events/event[id=\'UI_COMMIT\']", sample_frequency=0)
#path2 = agent_pb2.Path(path="/junos/events", sample_frequency=0)
# Use Path(s) to setup path_list ...
#path_list = [path]
path_list = [path1, path2]
logger.debug("Value of 'path_list':\n" + str(path_list))
# Use SubscriptionInput and path_list to setup SubscriptionRequest ...
subscription_request = agent_pb2.SubscriptionRequest(input=subscription_input,
path_list=path_list)
logger.info("Value of 'subscription_request':\n" + str(subscription_request))
# Define Kafka Endpoint
producer = None
if SOUTHBOUND_KAFKA_FLAG:
bootstrap_server = KAFKA_IP + ":" + KAFKA_PORT
logger.info("Value of 'bootstrap_server':" + bootstrap_server)
# Connect to Kafka as a Producer
logger.info("Connecting to Kafka as a Producer ...")
producer = KafkaProducer(bootstrap_servers=bootstrap_server)
logger.info("... Done!\n")
# Launch telemetry subscription request ...
for message in stub.telemetrySubscribe(subscription_request):
# Print each telemetry message to console.
print(message)
# Parse message and assemble contents in JSON format in preparation for Kafka push.
data = {}
data['system_id'] = message.system_id
data['component_id'] = message.component_id
data['sub_component_id'] = message.sub_component_id
data['path'] = message.path
data['sequence_number'] = message.sequence_number
data['timestamp'] = message.timestamp
# The telemetry data returned is a list of key-value pairs, where the value can be one of the following
# possible values: double_value, int_value, uint_value, sint_value, bool_value, str_value, bytes_value.
kv_pairs = []
for kv in message.kv:
key = kv.key
value_type = kv.WhichOneof('value')
if value_type == "double_value":
kv_pairs.append({
key: kv.double_value
})
if value_type == "int_value":
kv_pairs.append({
key: kv.int_value
})
if value_type == "uint_value":
kv_pairs.append({
key: kv.uint_value
})
if value_type == "sint_value":
kv_pairs.append({
key: kv.sint_value
})
if value_type == "bool_value":
kv_pairs.append({
key: kv.bool_value
})
if value_type == "str_value":
kv_pairs.append({
key: kv.str_value
})
if value_type == "bytes_value":
kv_pairs.append({
key: kv.bytes_value
})
data['kv_pairs'] = kv_pairs
#data['key'] = 'value'
# Encode the data in JSON and pretty-print it before firing it off to Kafka.
json_data = json.dumps(data, indent=3)
if SOUTHBOUND_KAFKA_FLAG:
# Publish message to Kafka bus.
# Route to an appropriate Kafka topic, based on the telemetry subscription path.
logger.info("Pushing message to Kafka ...")
if JTI_INTERFACES_SENSOR_SUBSTRING in message.path:
#producer.send(KAFKA_TOPIC_JUNIPER, json_data)
producer.send(KAFKA_TOPIC_JUNIPER_INTERFACES, json_data)
#producer.send('juniper', json_data)
elif JTI_SYSLOG_SENSOR_SUBSTRING in message.path:
#producer.send(KAFKA_TOPIC_JUNIPER, json_data)
producer.send(KAFKA_TOPIC_JUNIPER_SYSLOG, json_data)
#producer.send("juniper", json_data)
else:
producer.send(KAFKA_TOPIC_JUNIPER, json_data)
# Block until all async messages are sent.
# Failing to do so may result in the Producer being killed before messages are actually delivered!
# Note that send() operates asynchronously!
producer.flush()
logger.info("... Done!\n")
# TODO - WORK IN PROGRESS ...
# ----------[ FUNCTION 'parseArguments()' ]----------
def parseArguments():
logger.info("FUNCTION 'parseArguments(): BEGIN")
# Note: Because of the number and complexity of the arguments, we will not use single character option letters
# (eg. -o).
# Instead, to promote clarity, we will enforce the use of long options (eg. --arg1).
# Create an 'ArgumentParser' object, which will hold all the info necessary to parse the CLI options.
argumentParser = argparse.ArgumentParser(description=ARGPARSER)
# Because we need to support multiple operations (ADD, DELETE, MODIFY) in one script, different operation types will
# require different arguments. We use ArgParse's 'subparser' functionality here.
# Split the functionality into sub-commands: ADD, DELETE, MODIFY and associated specific arguments with sub-command.
argumentSubParsers = argumentParser.add_subparsers(help="Commands")
opType_Add_Parser = argumentSubParsers.add_parser("ADD")
opType_Delete_Parser = argumentSubParsers.add_parser("DELETE")
opType_Modify_Parser = argumentSubParsers.add_parser("MODIFY")
# Add the individual argements for ADD ...
opType_Add_Parser.add_argument('--arg1', required=True, help="Argument 1")
# Add the individual argements for DELETE ...
# The API allows us to decommission a service based on Service ID, Service Order Name, or External ID.
# We will implement support for deletion based on Service Name and External ID, however these must be
# mutually exclusive. We use Argparse's 'add_mutually_exclusive_group()' to implement this. We also set
# 'required=True' on the mutually exclusive group to force the user to enter either Service Order Name or
# External ID.
mutuallyExclusiveGroup_Delete = opType_Delete_Parser.add_mutually_exclusive_group(required=True)
mutuallyExclusiveGroup_Delete.add_argument('--serviceName', help="Service Name")
mutuallyExclusiveGroup_Delete.add_argument('--externalId', help="External ID")
# Explicitly refer to the global variable to avoid creating a local variable that shadows the outer scope.
#global args
# Convert argument strings to objects and assign them as attributes of a namespace.
# Return the populated namespace.
# For example, if the script was invoked with "python 219-main.py --arg1 'test1' --arg2=test2 --arg3 test3",
# then 'argumentParser.parse_args()' will return "Namespace(arg1='test1', arg2='test2', arg3='test3')"
#
# For DELETE, because we allow either the '--externalId' or '--serviceName' options to be set, we need to check
# which one was set by looking into 'args'. For example, if the script was invoked with a value of "extId1" for
# '--externalId' and nothing for '--serviceName', then the value of "args" will look like this:
# Namespace(externalId='extId1', serviceName=None)
# So, when checking if an optional argument was set or not, we need to test whether it's value is None.
arguments = argumentParser.parse_args()
logger.debug("Value of 'args': " + str(arguments))
logger.info("FUNCTION 'parseArguments(): END")
return arguments
# ----------[ FUNCTION 'setupLogging()' ]----------
def | setupLogging | identifier_name |
|
jti-grpc-client-python.py | Multiple Sensor Subscriptions ...
path1 = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/state/", sample_frequency=5000)
path2 = agent_pb2.Path(path="/junos/events/event[id=\'UI_COMMIT\']", sample_frequency=0)
#path2 = agent_pb2.Path(path="/junos/events", sample_frequency=0)
# Use Path(s) to setup path_list ...
#path_list = [path]
path_list = [path1, path2]
logger.debug("Value of 'path_list':\n" + str(path_list))
# Use SubscriptionInput and path_list to setup SubscriptionRequest ...
subscription_request = agent_pb2.SubscriptionRequest(input=subscription_input,
path_list=path_list)
logger.info("Value of 'subscription_request':\n" + str(subscription_request))
# Define Kafka Endpoint
producer = None
if SOUTHBOUND_KAFKA_FLAG:
bootstrap_server = KAFKA_IP + ":" + KAFKA_PORT
logger.info("Value of 'bootstrap_server':" + bootstrap_server)
# Connect to Kafka as a Producer
logger.info("Connecting to Kafka as a Producer ...")
producer = KafkaProducer(bootstrap_servers=bootstrap_server)
logger.info("... Done!\n")
# Launch telemetry subscription request ...
for message in stub.telemetrySubscribe(subscription_request):
# Print each telemetry message to console.
print(message)
# Parse message and assemble contents in JSON format in preparation for Kafka push.
data = {}
data['system_id'] = message.system_id
data['component_id'] = message.component_id
data['sub_component_id'] = message.sub_component_id
data['path'] = message.path
data['sequence_number'] = message.sequence_number
data['timestamp'] = message.timestamp
# The telemetry data returned is a list of key-value pairs, where the value can be one of the following
# possible values: double_value, int_value, uint_value, sint_value, bool_value, str_value, bytes_value.
kv_pairs = []
for kv in message.kv:
key = kv.key
value_type = kv.WhichOneof('value')
if value_type == "double_value":
kv_pairs.append({
key: kv.double_value
})
if value_type == "int_value":
kv_pairs.append({
key: kv.int_value
})
if value_type == "uint_value":
kv_pairs.append({
key: kv.uint_value
})
if value_type == "sint_value":
kv_pairs.append({
key: kv.sint_value
})
if value_type == "bool_value":
kv_pairs.append({
key: kv.bool_value
})
if value_type == "str_value":
kv_pairs.append({
key: kv.str_value
})
if value_type == "bytes_value":
kv_pairs.append({
key: kv.bytes_value
})
data['kv_pairs'] = kv_pairs
#data['key'] = 'value'
# Encode the data in JSON and pretty-print it before firing it off to Kafka.
json_data = json.dumps(data, indent=3)
if SOUTHBOUND_KAFKA_FLAG:
# Publish message to Kafka bus.
# Route to an appropriate Kafka topic, based on the telemetry subscription path.
logger.info("Pushing message to Kafka ...")
if JTI_INTERFACES_SENSOR_SUBSTRING in message.path:
#producer.send(KAFKA_TOPIC_JUNIPER, json_data)
producer.send(KAFKA_TOPIC_JUNIPER_INTERFACES, json_data)
#producer.send('juniper', json_data)
elif JTI_SYSLOG_SENSOR_SUBSTRING in message.path:
#producer.send(KAFKA_TOPIC_JUNIPER, json_data)
producer.send(KAFKA_TOPIC_JUNIPER_SYSLOG, json_data)
#producer.send("juniper", json_data)
else:
producer.send(KAFKA_TOPIC_JUNIPER, json_data)
# Block until all async messages are sent.
# Failing to do so may result in the Producer being killed before messages are actually delivered!
# Note that send() operates asynchronously!
producer.flush()
logger.info("... Done!\n")
# TODO - WORK IN PROGRESS ...
# ----------[ FUNCTION 'parseArguments()' ]----------
def parseArguments():
logger.info("FUNCTION 'parseArguments(): BEGIN")
# Note: Because of the number and complexity of the arguments, we will not use single character option letters
# (eg. -o).
# Instead, to promote clarity, we will enforce the use of long options (eg. --arg1).
# Create an 'ArgumentParser' object, which will hold all the info necessary to parse the CLI options.
argumentParser = argparse.ArgumentParser(description=ARGPARSER)
# Because we need to support multiple operations (ADD, DELETE, MODIFY) in one script, different operation types will
# require different arguments. We use ArgParse's 'subparser' functionality here.
# Split the functionality into sub-commands: ADD, DELETE, MODIFY and associated specific arguments with sub-command.
argumentSubParsers = argumentParser.add_subparsers(help="Commands")
opType_Add_Parser = argumentSubParsers.add_parser("ADD")
opType_Delete_Parser = argumentSubParsers.add_parser("DELETE")
opType_Modify_Parser = argumentSubParsers.add_parser("MODIFY")
# Add the individual argements for ADD ...
opType_Add_Parser.add_argument('--arg1', required=True, help="Argument 1")
# Add the individual argements for DELETE ...
# The API allows us to decommission a service based on Service ID, Service Order Name, or External ID.
# We will implement support for deletion based on Service Name and External ID, however these must be
# mutually exclusive. We use Argparse's 'add_mutually_exclusive_group()' to implement this. We also set
# 'required=True' on the mutually exclusive group to force the user to enter either Service Order Name or
# External ID.
mutuallyExclusiveGroup_Delete = opType_Delete_Parser.add_mutually_exclusive_group(required=True)
mutuallyExclusiveGroup_Delete.add_argument('--serviceName', help="Service Name")
mutuallyExclusiveGroup_Delete.add_argument('--externalId', help="External ID")
# Explicitly refer to the global variable to avoid creating a local variable that shadows the outer scope.
#global args
# Convert argument strings to objects and assign them as attributes of a namespace.
# Return the populated namespace.
# For example, if the script was invoked with "python 219-main.py --arg1 'test1' --arg2=test2 --arg3 test3",
# then 'argumentParser.parse_args()' will return "Namespace(arg1='test1', arg2='test2', arg3='test3')"
#
# For DELETE, because we allow either the '--externalId' or '--serviceName' options to be set, we need to check
# which one was set by looking into 'args'. For example, if the script was invoked with a value of "extId1" for
# '--externalId' and nothing for '--serviceName', then the value of "args" will look like this:
# Namespace(externalId='extId1', serviceName=None)
# So, when checking if an optional argument was set or not, we need to test whether it's value is None.
arguments = argumentParser.parse_args()
logger.debug("Value of 'args': " + str(arguments))
logger.info("FUNCTION 'parseArguments(): END")
return arguments
# ----------[ FUNCTION 'setupLogging()' ]----------
def setupLogging():
# Explicitly refer to the global variable to avoid creating a local variable that shadows the outer scope.
global logger
# Create logger.
logger = logging.getLogger(__name__)
# Create a rotating log file handler based on how much time has elapsed.
# In this case, rotate the log file every day, to a maximum of 5 days.
logFileHandler = TimedRotatingFileHandler(LOGFILE,
when="d",
interval=1,
backupCount=5)
# Set logging level to DEBUG or INFO
# Note: Need to explicitly set 'logger.setLevel()' or we just get an empty file.
logger.setLevel(logging.DEBUG)
logFileHandler.setLevel(logging.DEBUG)
# Create logging format and add it to the log file handler.
logFormatter = logging.Formatter('[%(asctime)s][%(filename)s][Func %(funcName)s()][Line %(lineno)d][%(levelname)s]: %(message)s')
logFileHandler.setFormatter(logFormatter)
# Add the log file handler to the logger.
logger.addHandler(logFileHandler)
return
# ----------[ ]----------
# Have the code only execute when the module is run directly as a program, and not have it
# execute when someone wants to import the module and invoke the functions themselves.
if __name__ == "__main__":
# Note: 'sys.argv' is a list of strings representing the command-line arguments.
# 'sys.argv[0]' always represents the script name. So we are really only interested in the arguments after the
# script name.
# We can retrieve the arguments after the script name as follows: 'sys.argv[1:]'
# Even though we are using 'argparse' to parse the argument list, we should still log the contents of 'sys.argv[0]' | # for debugging purposes.
main(sys.argv[1:]) | random_line_split |
|
jti-grpc-client-python.py | LECTOR_PORT = 50051
# MX2 - OpenConfig 0.0.0.10 - gRPC
#DEVICE_IP = "10.49.126.157"
# MX3 - OpenConfig 0.0.0.9 - Native & Syslog
DEVICE_IP = "10.49.114.76"
# Paul Abbott's Router
#DEVICE_IP = "10.49.123.198"
DEVICE_PASSWORD = ""
DEVICE_PORT = "50051"
DEVICE_USERNAME = ""
INVOKE_GET_OPERATIONAL_STATE_FLAG = False
INVOKE_GET_DATA_ENCODINGS_FLAG = False
INVOKE_GET_SUBSCRIPTIONS_FLAG = False
JTI_INTERFACES_SENSOR_SUBSTRING = "/interfaces/interface"
JTI_SYSLOG_SENSOR_SUBSTRING = "/junos/events"
KAFKA_IP = "10.49.114.238"
KAFKA_PORT = "9092"
KAFKA_TOPIC_JUNIPER = "juniper"
KAFKA_TOPIC_JUNIPER_INTERFACES = "juniper_interfaces"
KAFKA_TOPIC_JUNIPER_SYSLOG = "juniper_syslog"
LOGFILE = "log/jti_grpc_client.log"
SOUTHBOUND_KAFKA_FLAG = False
def enum(**named_values):
return type('Enum', (), named_values)
VERBOSITY = enum(BRIEF='BRIEF', DETAIL='DETAIL', TERSE='TERSE')
# ----------[ GLOBAL VARIABLES ]----------
args = None
logger = None
# ----------[ FUNCTION 'main()' ]----------
def main(argv):
# Explicitly refer to the global variables to avoid creating a local variable that shadows the outer scope.
global args
# STEP 1: Setup the logging so we can start to log debug info right away.
setupLogging()
logger.debug("\n\n\n--------------------[ JTI-GRPC-CLIENT: BEGIN EXECUTION ]--------------------")
# STEP 2: Parse the argument list with which this script was invoked and set global variable 'args'.
logger.debug("Number of arguments used in script invocation: " + str(len(argv)) + "\n")
logger.info("Full argument list: " + str(argv) + "\n")
#args = parseArguments()
# STEP 3: Create an insecure channel to the gRPC server running on the router, and use the channel to create
# the client stub
device_ip_and_port = DEVICE_IP + ":" + DEVICE_PORT
logger.info("Creating insecure channel to " + device_ip_and_port + " ...")
grpc_channel = grpc.insecure_channel(device_ip_and_port)
stub = agent_pb2_grpc.OpenConfigTelemetryStub(grpc_channel)
logger.info("... Done!\n")
# STEP 4: Get the Telemetry Agent operational states (this is one of the methods exposed via agent.proto).
# As per the .proto file, use 0xFFFFFFFF for all subscription identifiers including agent-level operational stats.
if INVOKE_GET_OPERATIONAL_STATE_FLAG:
logger.info("Invoking 'getTelemetryOperationalState()' ...")
get_oper_state_request = agent_pb2.GetOperationalStateRequest(subscription_id = 0xFFFFFFFF,
verbosity = agent_pb2.VerbosityLevel.Value(VERBOSITY.BRIEF))
get_oper_state_response = stub.getTelemetryOperationalState(get_oper_state_request)
logger.info(str(get_oper_state_response))
logger.info("... Done!\n")
# STEP 5: Return the set of data encodings supported by the device for telemetry data.
if INVOKE_GET_DATA_ENCODINGS_FLAG:
logger.info("Invoking 'getDataEncodings()' ...")
get_data_encoding_request = agent_pb2.DataEncodingRequest()
get_data_encoding_reply = stub.getDataEncodings(get_data_encoding_request)
logger.info(str(get_data_encoding_reply))
logger.info("... Done!\n")
# STEP 6: Get the list of current telemetry subscriptions from the target (this is one of the methods exposed via agent.proto).
# As per the .proto file, use 0xFFFFFFFF for all subscription identifiers.
if INVOKE_GET_SUBSCRIPTIONS_FLAG:
logger.info("Invoking 'getTelemetrySubscriptions()' ...")
get_subscriptions_request = agent_pb2.GetSubscriptionsRequest(subscription_id = 0xFFFFFFFF)
get_subscriptions_reply = stub.getTelemetrySubscriptions(get_subscriptions_request)
logger.info(str(get_subscriptions_reply))
logger.info("... Done!\n")
# STEP 7: The telemetrySubscribe() method requires a SubscriptionRequest object as an input, which in turn requires
# a SubscriptionInput object and a list of Path objects as input ... assemble these various objects.
# Setup Collector ...
collector = agent_pb2.Collector(address=COLLECTOR_ADDRESS, port=COLLECTOR_PORT)
logger.debug("Value of 'collector': " + str(collector))
# Use Collector to setup SubscriptionInput ...
subscription_input = agent_pb2.SubscriptionInput(collector_list=[collector])
logger.debug("Value of 'subscription_input':\n" + str(subscription_input))
# Setup Path ...
#path = agent_pb2.Path(path="/junos/system/linecard/interface/", sample_frequency=5000)
#path = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/", sample_frequency=5000)
#path = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/state/", sample_frequency=5000)
#path = agent_pb2.Path(path="/junos/events", sample_frequency=0)
#path = agent_pb2.Path(path="/junos/events/event[id=\'UI_COMMIT\']", sample_frequency=0)
#path = agent_pb2.Path(path="/components/", sample_frequency=5000)
## Multiple Sensor Subscriptions ...
path1 = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/state/", sample_frequency=5000)
path2 = agent_pb2.Path(path="/junos/events/event[id=\'UI_COMMIT\']", sample_frequency=0)
#path2 = agent_pb2.Path(path="/junos/events", sample_frequency=0)
# Use Path(s) to setup path_list ...
#path_list = [path]
path_list = [path1, path2]
logger.debug("Value of 'path_list':\n" + str(path_list))
# Use SubscriptionInput and path_list to setup SubscriptionRequest ...
subscription_request = agent_pb2.SubscriptionRequest(input=subscription_input,
path_list=path_list)
logger.info("Value of 'subscription_request':\n" + str(subscription_request))
# Define Kafka Endpoint
producer = None
if SOUTHBOUND_KAFKA_FLAG:
bootstrap_server = KAFKA_IP + ":" + KAFKA_PORT
logger.info("Value of 'bootstrap_server':" + bootstrap_server)
# Connect to Kafka as a Producer
logger.info("Connecting to Kafka as a Producer ...")
producer = KafkaProducer(bootstrap_servers=bootstrap_server)
logger.info("... Done!\n")
# Launch telemetry subscription request ...
for message in stub.telemetrySubscribe(subscription_request):
# Print each telemetry message to console.
print(message)
# Parse message and assemble contents in JSON format in preparation for Kafka push.
data = {}
data['system_id'] = message.system_id
data['component_id'] = message.component_id
data['sub_component_id'] = message.sub_component_id
data['path'] = message.path
data['sequence_number'] = message.sequence_number
data['timestamp'] = message.timestamp
# The telemetry data returned is a list of key-value pairs, where the value can be one of the following
# possible values: double_value, int_value, uint_value, sint_value, bool_value, str_value, bytes_value.
kv_pairs = []
for kv in message.kv:
key = kv.key
value_type = kv.WhichOneof('value')
if value_type == "double_value":
kv_pairs.append({
key: kv.double_value
})
if value_type == "int_value":
kv_pairs.append({
key: kv.int_value
})
if value_type == "uint_value":
kv_pairs.append({
key: kv.uint_value
})
if value_type == "sint_value":
kv_pairs.append({
key: kv.sint_value
})
if value_type == "bool_value":
kv_pairs.append({
key: kv.bool_value
})
if value_type == "str_value":
kv_pairs.append({
key: kv.str_value
})
if value_type == "bytes_value":
kv_pairs.append({
key: kv.bytes_value
})
data['kv_pairs'] = kv_pairs
#data['key'] = 'value'
# Encode the data in JSON and pretty-print it before firing it off to Kafka.
json_data = json.dumps(data, indent=3)
if SOUTHBOUND_KAFKA_FLAG:
# Publish message to Kafka bus.
# Route to an appropriate Kafka topic, based on the telemetry subscription path.
logger.info("Pushing message to Kafka ...")
if JTI_INTERFACES_SENSOR_SUBSTRING in message.path:
#producer.send(KAFKA_TOPIC_JUNIPER, json_data)
| producer.send(KAFKA_TOPIC_JUNIPER_INTERFACES, json_data)
#producer.send('juniper', json_data) | conditional_block |
|
jti-grpc-client-python.py | 88 888 888 Y88888 888
# Y88b d88P 888 888 888 888 Y8888 888
# "Y8888P" 88888888 8888888 8888888888 888 Y888 888
#
#
# > Script: jti-grpc-client-python.py
# > Author: Tech Mocha
# > Company: Tech Mocha
# > Version: 0.1
# > Revision Date: 2018-09-19
#
# ####################################################################
'''
----------[ U S A G E E X A M P L E S ]----------
(0) INSTALL PREREQUISITE PACKAGES:
> curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
> sudo python get-pip.py
> sudo pip install kafka-python
> sudo pip install requests
'''
# ----------[ IMPORTS ]----------
import argparse
import calendar
import xml.etree
import xml.etree.ElementTree as ET
import grpc
import json
import logging
import requests
import sys
import time
import xml.dom.minidom
from kafka import KafkaConsumer
from kafka import KafkaProducer
from logging.handlers import TimedRotatingFileHandler
from protos_compiled import agent_pb2
from protos_compiled import agent_pb2_grpc
from protos_compiled import authentication_service_pb2
from protos_compiled import authentication_service_pb2_grpc
# ----------[ GLOBAL CONSTANTS ]----------
ARGPARSER = "JTI-GRPC-CLIENT ARGUMENTS"
COLLECTOR_ADDRESS = "10.49.123.198"
COLLECTOR_PORT = 50051
# MX2 - OpenConfig 0.0.0.10 - gRPC
#DEVICE_IP = "10.49.126.157"
# MX3 - OpenConfig 0.0.0.9 - Native & Syslog
DEVICE_IP = "10.49.114.76"
# Paul Abbott's Router
#DEVICE_IP = "10.49.123.198"
DEVICE_PASSWORD = ""
DEVICE_PORT = "50051"
DEVICE_USERNAME = ""
INVOKE_GET_OPERATIONAL_STATE_FLAG = False
INVOKE_GET_DATA_ENCODINGS_FLAG = False
INVOKE_GET_SUBSCRIPTIONS_FLAG = False
JTI_INTERFACES_SENSOR_SUBSTRING = "/interfaces/interface"
JTI_SYSLOG_SENSOR_SUBSTRING = "/junos/events"
KAFKA_IP = "10.49.114.238"
KAFKA_PORT = "9092"
KAFKA_TOPIC_JUNIPER = "juniper"
KAFKA_TOPIC_JUNIPER_INTERFACES = "juniper_interfaces"
KAFKA_TOPIC_JUNIPER_SYSLOG = "juniper_syslog"
LOGFILE = "log/jti_grpc_client.log"
SOUTHBOUND_KAFKA_FLAG = False
def enum(**named_values):
return type('Enum', (), named_values)
VERBOSITY = enum(BRIEF='BRIEF', DETAIL='DETAIL', TERSE='TERSE')
# ----------[ GLOBAL VARIABLES ]----------
args = None
logger = None
# ----------[ FUNCTION 'main()' ]----------
def main(argv):
# Explicitly refer to the global variables to avoid creating a local variable that shadows the outer scope.
| # As per the .proto file, use 0xFFFFFFFF for all subscription identifiers including agent-level operational stats.
if INVOKE_GET_OPERATIONAL_STATE_FLAG:
logger.info("Invoking 'getTelemetryOperationalState()' ...")
get_oper_state_request = agent_pb2.GetOperationalStateRequest(subscription_id = 0xFFFFFFFF,
verbosity = agent_pb2.VerbosityLevel.Value(VERBOSITY.BRIEF))
get_oper_state_response = stub.getTelemetryOperationalState(get_oper_state_request)
logger.info(str(get_oper_state_response))
logger.info("... Done!\n")
# STEP 5: Return the set of data encodings supported by the device for telemetry data.
if INVOKE_GET_DATA_ENCODINGS_FLAG:
logger.info("Invoking 'getDataEncodings()' ...")
get_data_encoding_request = agent_pb2.DataEncodingRequest()
get_data_encoding_reply = stub.getDataEncodings(get_data_encoding_request)
logger.info(str(get_data_encoding_reply))
logger.info("... Done!\n")
# STEP 6: Get the list of current telemetry subscriptions from the target (this is one of the methods exposed via agent.proto).
# As per the .proto file, use 0xFFFFFFFF for all subscription identifiers.
if INVOKE_GET_SUBSCRIPTIONS_FLAG:
logger.info("Invoking 'getTelemetrySubscriptions()' ...")
get_subscriptions_request = agent_pb2.GetSubscriptionsRequest(subscription_id = 0xFFFFFFFF)
get_subscriptions_reply = stub.getTelemetrySubscriptions(get_subscriptions_request)
logger.info(str(get_subscriptions_reply))
logger.info("... Done!\n")
# STEP 7: The telemetrySubscribe() method requires a SubscriptionRequest object as an input, which in turn requires
# a SubscriptionInput object and a list of Path objects as input ... assemble these various objects.
# Setup Collector ...
collector = agent_pb2.Collector(address=COLLECTOR_ADDRESS, port=COLLECTOR_PORT)
logger.debug("Value of 'collector': " + str(collector))
# Use Collector to setup SubscriptionInput ...
subscription_input = agent_pb2.SubscriptionInput(collector_list=[collector])
logger.debug("Value of 'subscription_input':\n" + str(subscription_input))
# Setup Path ...
#path = agent_pb2.Path(path="/junos/system/linecard/interface/", sample_frequency=5000)
#path = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/", sample_frequency=5000)
#path = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/state/", sample_frequency=5000)
#path = agent_pb2.Path(path="/junos/events", sample_frequency=0)
#path = agent_pb2.Path(path="/junos/events/event[id=\'UI_COMMIT\']", sample_frequency=0)
#path = agent_pb2.Path(path="/components/", sample_frequency=5000)
## Multiple Sensor Subscriptions ...
path1 = agent_pb2.Path(path="/interfaces/interface[name='ge-0/0/0']/state/", sample_frequency=5000)
path2 = agent_pb2.Path(path="/junos/events/event[id=\'UI_COMMIT\']", sample_frequency=0)
#path2 = agent_pb2.Path(path="/junos/events", sample_frequency=0)
# Use Path(s) to setup path_list ...
#path_list = [path]
path_list = [path1, path2]
logger.debug("Value of 'path_list':\n" + str(path_list))
# Use SubscriptionInput and path_list to setup SubscriptionRequest ...
subscription_request = agent_pb2.SubscriptionRequest(input=subscription_input,
path_list=path_list)
logger.info("Value of 'subscription_request':\n" + str(subscription_request))
# Define Kafka Endpoint
producer = None
if SOUTHBOUND_KAFKA_FLAG:
bootstrap_server = KAFKA_IP + ":" + KAFKA_PORT
logger.info("Value of 'bootstrap_server':" + bootstrap_server)
# Connect to Kafka as a Producer
logger.info("Connecting to Kafka as a Producer ...")
producer = KafkaProducer(bootstrap_servers=bootstrap_server)
logger.info("... Done!\n")
# Launch telemetry subscription request ...
for message in stub.telemetrySubscribe(subscription_request):
# Print each telemetry message to console.
print(message)
# Parse message and assemble contents in JSON format in preparation for Kafka push.
data = {}
data['system_id'] = message.system_id
data['component_id'] = message.component_id
data['sub_component_id'] | global args
# STEP 1: Setup the logging so we can start to log debug info right away.
setupLogging()
logger.debug("\n\n\n--------------------[ JTI-GRPC-CLIENT: BEGIN EXECUTION ]--------------------")
# STEP 2: Parse the argument list with which this script was invoked and set global variable 'args'.
logger.debug("Number of arguments used in script invocation: " + str(len(argv)) + "\n")
logger.info("Full argument list: " + str(argv) + "\n")
#args = parseArguments()
# STEP 3: Create an insecure channel to the gRPC server running on the router, and use the channel to create
# the client stub
device_ip_and_port = DEVICE_IP + ":" + DEVICE_PORT
logger.info("Creating insecure channel to " + device_ip_and_port + " ...")
grpc_channel = grpc.insecure_channel(device_ip_and_port)
stub = agent_pb2_grpc.OpenConfigTelemetryStub(grpc_channel)
logger.info("... Done!\n")
# STEP 4: Get the Telemetry Agent operational states (this is one of the methods exposed via agent.proto). | identifier_body |
testing.js | Kill monsters within 64 pixels of the strike
this.monsterGroup.forEachAlive(function(monster) {
if (this.game.math.distance(
this.game.input.activePointer.x, this.game.input.activePointer.y,
monster.x, monster.y) < 64) {
monster.frame = 2; // Show the "dead" texture
monster.body.velocity.y = this.game.rnd.integerInRange(-600, -1200);
monster.body.velocity.x = this.game.rnd.integerInRange(-500, 500);
monster.body.acceleration.y = 3000;
monster.angle = 180;
Config.killed.code++;
// Create an explosion
this.getExplosion(monster.x, monster.y);
}
}, this);
this.bugGroup.forEachAlive(function(bug) {
if (this.game.math.distance(
this.game.input.activePointer.x, this.game.input.activePointer.y,
bug.x, bug.y) < 64) {
bug.frame = 2; // Show the "dead" texture
bug.body.velocity.y = this.game.rnd.integerInRange(-600, -1200);
bug.body.velocity.x = this.game.rnd.integerInRange(-500, 500);
bug.body.acceleration.y = 3000;
bug.angle = 180;
Config.killed.bugs++;
// Create an explosion
this.getExplosion(bug.x, bug.y);
}
}, this);
// Rotate the lightning sprite so it goes in the
// direction of the pointer
this.lightning.rotation =
this.game.math.angleBetween(
this.lightning.x, this.lightning.y,
this.game.input.activePointer.x, this.game.input.activePointer.y
) - Math.PI / 2;
// Calculate the distance from the lightning source to the pointer
var distance = this.game.math.distance(
this.lightning.x, this.lightning.y,
this.game.input.activePointer.x, this.game.input.activePointer.y
);
// Create the lightning texture
this.createLightningTexture(this.lightningBitmap.width / 2, 0, 20, 3, false, distance);
// Make the lightning sprite visible
this.lightning.alpha = 1;
// Fade out the lightning sprite using a tween on the alpha property.
// Check out the "Easing function" examples for more info.
this.game.add.tween(this.lightning)
.to({
alpha: 0.5
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 1.0
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 0.5
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 1.0
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 0
}, 250, Phaser.Easing.Cubic.In)
.start();
// Create the flash
this.flash.alpha = 1;
this.game.add.tween(this.flash)
.to({
alpha: 0
}, 100, Phaser.Easing.Cubic.In)
.start();
// Shake the camera by moving it up and down 5 times really fast
this.game.camera.y = 0;
this.game.add.tween(this.game.camera)
.to({
y: -10
}, 40, Phaser.Easing.Sinusoidal.InOut, false, 0, 5, true)
.start();
}
};
GameState.prototype.createNewMonster = function() {
var monster = this.monsterGroup.getFirstDead(); // Recycle a dead monster
if (monster) {
monster.reset(this.game.width + 100, this.game.height - 48); // Position on ground
monster.revive(); // Set "alive"
monster.body.velocity.setTo(0, 0); // Stop moving
monster.body.acceleration.setTo(0, 0); // Stop accelerating
monster.body.velocity.x = -100; // Move left
monster.rotation = 0; // Reset rotation
monster.frame = 0; // Set animation frame to 0
monster.anchor.setTo(0.5, 0.5); // Center texture
monster.animations.add('walk', [0, 1], 2);
monster.animations.play('walk', 6, true);
}
};
GameState.prototype.createNewBug = function() {
var bug = this.bugGroup.getFirstDead(); // Recycle a dead monster
if (bug) {
//this.game.width +
var ground_x = this.game.width + Math.round(Math.random() * this.game.width);
//console.log('ground_x', ground_x);
bug.reset(ground_x, this.game.height - 48); // Position on ground
bug.revive(); // Set "alive"
bug.body.velocity.setTo(0, 0); // Stop moving
bug.body.acceleration.setTo(0, 0); // Stop accelerating
bug.body.velocity.x = -300; // Move left
bug.rotation = 0; // Reset rotation
bug.frame = 0; // Set animation frame to 0
bug.anchor.setTo(0.5, 0.5); // Center texture
bug.animations.add('walk', [0, 1]);
bug.animations.play('walk', 6, true);
}
};
// Try to get a used explosion from the explosionGroup.
// If an explosion isn't available, create a new one and add it to the group.
// Setup new explosions so that they animate and kill themselves when the
// animation is complete.
GameState.prototype.getExplosion = function(x, y) {
// Get the first dead explosion from the explosionGroup
var explosion = this.explosionGroup.getFirstDead();
// If there aren't any available, create a new one
if (explosion === null) {
explosion = this.game.add.sprite(0, 0, 'explosion');
explosion.anchor.setTo(0.5, 0.5);
// Add an animation for the explosion that kills the sprite when the
// animation is complete. Plays the first frame several times to make the
// explosion more visible after the screen flash.
var animation = explosion.animations.add('boom', [0, 0, 0, 0, 1, 2, 3], 60, false);
animation.killOnComplete = true;
// Add the explosion sprite to the group
this.explosionGroup.add(explosion);
}
//console.log('boom');
// Revive the explosion (set it's alive property to true)
// You can also define a onRevived event handler in your explosion objects
// to do stuff when they are revived.
explosion.revive();
// Move the explosion to the given coordinates
explosion.x = x;
explosion.y = y;
// Set rotation of the explosion at random for a little variety
explosion.angle = this.game.rnd.integerInRange(0, 360);
// Play the animation
explosion.animations.play('boom');
// Return the explosion itself in case we want to do anything else with it
return explosion;
};
// This function creates a texture that looks like a lightning bolt
GameState.prototype.createLightningTexture = function(x, y, segments, boltWidth, branch, distance) {
// Get the canvas drawing context for the lightningBitmap
var ctx = this.lightningBitmap.context;
var width = this.lightningBitmap.width;
var height = this.lightningBitmap.height;
// Our lightning will be made up of several line segments starting at
// the center of the top edge of the bitmap and ending at the target.
// Clear the canvas
if (!branch) ctx.clearRect(0, 0, width, height);
// Draw each of the segments
for (var i = 0; i < segments; i++) {
// Set the lightning color and bolt width
ctx.strokeStyle = 'rgb(255, 255, 255)';
ctx.lineWidth = boltWidth;
ctx.beginPath();
ctx.moveTo(x, y);
// Calculate an x offset from the end of the last line segment and
// keep it within the bounds of the bitmap
if (branch) {
// For a branch
x += this.game.rnd.integerInRange(-10, 10);
} else {
// For the main bolt
x += this.game.rnd.integerInRange(-30, 30);
}
if (x <= 10) x = 10;
if (x >= width - 10) x = width - 10;
// Calculate a y offset from the end of the last line segment.
// When we've reached the target or there are no more segments left,
// set the y position to the distance to the target. For branches, we
// don't care if they reach the target so don't set the last coordinate
// to the target if it's hanging in the air.
if (branch) | {
// For a branch
y += this.game.rnd.integerInRange(10, 20);
} | conditional_block |
|
testing.js | // Show FPS
this.game.time.advancedTiming = true;
this.highscoreText = this.game.add.text(
this.game.width - 240, 20, '', {
font: '16px Arial',
fill: '#ffffff'
}
);
this.timerText = this.game.add.text(20, 20, '', {
font: '16px Arial',
fill: '#ffffff'
});
this.cooldownText = this.game.add.text(200, 20, '', {
font: '16px Arial',
fill: '#ffffff'
});
// Create our Timer
this.cooldownTimer = game.time.create(true);
// Set a TimerEvent to occur after 2 seconds
this.cooldownTimer.loop(Config.cooldown, timeCooldownFn, this);
//this.cooldownTimer.start();
// Create our Timer
this.timer = game.time.create(true);
// Set a TimerEvent to occur after 2 seconds
this.timer.loop(this.TIMER_MAX, timeIsOut, this);
// Start the timer running - this is important!
// It won't start automatically, allowing you to hook it to button events and the like.
this.timer.start();
stateText = game.add.text(game.world.centerX, game.world.centerY, ' ', {
font: '84px Arial',
fill: '#fff'
});
stateText.anchor.setTo(0.5, 0.5);
stateText.visible = false;
};
function timeCooldownFn() {
this.cooldownTimer.paused = true;
//cooldownText
}
function timeIsOut() {
var codestat = Config.pathed.code;
var bugstat = Config.killed.bugs - Config.pathed.bugs;;
stateText.text = " Time is up!\n Pathed Code: " + codestat.toString() + "\n Catched Buds: " + bugstat.toString();
stateText.visible = true;
this.game.paused = true;
window["GameStage7"].game.score = codestat + bugstat;
window["GameStage7"].game.finished = true;
$scope.$digest();
finishGame();
};
function | (game) {
minutes = Math.floor(game.timer.duration.toFixed(0) / 60000) % 60;
seconds = Math.floor(game.timer.duration.toFixed(0) / 1000) % 60;
milliseconds = Math.floor(game.timer.duration.toFixed(0)) % 100;
//If any of the digits becomes a single digit number, pad it with a zero
if (milliseconds < 10)
milliseconds = '0' + milliseconds;
if (seconds < 10)
seconds = '0' + seconds;
if (minutes < 10)
minutes = '0' + minutes;
game.timerText.setText('Time last: ' + minutes + ':' + seconds + ':' + milliseconds);
}
function updateCooldown(game) {
minutes = Math.floor(game.cooldownTimer.duration.toFixed(0) / 60000) % 60;
seconds = Math.floor(game.cooldownTimer.duration.toFixed(0) / 1000) % 60;
milliseconds = Math.floor(game.cooldownTimer.duration.toFixed(0)) % 100;
//If any of the digits becomes a single digit number, pad it with a zero
if (milliseconds < 10)
milliseconds = '0' + milliseconds;
if (seconds < 10)
seconds = '0' + seconds;
if (minutes < 10)
minutes = '0' + minutes;
game.cooldownText.setText('Cooldown: ' + minutes + ':' + seconds + ':' + milliseconds);
}
// The update() method is called every frame
GameState.prototype.update = function() {
updateTimer(this);
updateCooldown(this);
var codestat = Config.pathed.code;
var bugstat = Config.killed.bugs - Config.pathed.bugs;
this.highscoreText.setText("Highscore: bugs catched: " + bugstat + '\n code saved: ' + codestat);
//this.cooldownText.setText("Cooldown");
// Spawn a new monster
this.monsterTimer -= this.game.time.elapsed;
if (this.monsterTimer <= 0) {
this.monsterTimer = this.game.rnd.integerInRange(150, 500);
this.createNewMonster();
}
// Spawn a new bug
this.bugTimer -= this.game.time.elapsed;
if (this.bugTimer <= 0) {
this.bugTimer = this.game.rnd.integerInRange(150, 500);
this.createNewBug();
}
// Kill monsters when they go off screen
this.monsterGroup.forEachAlive(function(monster) {
//this.monsterTimer -= this.game.time.elapsed;
/*if (this.monsterTimer <= 0) {
this.monsterTimer = this.game.rnd.integerInRange(150, 500);
if(monster.frame == 0) monster.frame = 1
else monster.frame = 0;
}*/
if (monster.x < -64) {
if (monster.frame != 2) Config.pathed.code++;
monster.kill();
}
if (monster.y > this.game.height) {
monster.kill();
}
}, this);
this.bugGroup.forEachAlive(function(bug) {
/*this.bugTimer -= this.game.time.elapsed;
if (this.bugTimer <= 0) {
this.bugTimer = this.game.rnd.integerInRange(150, 500);
if(bug.frame == 0) bug.frame = 1
else bug.frame = 0;
}*/
if (bug.x < -64) {
if (bug.frame != 2) Config.pathed.bugs++;
bug.kill();
}
if (bug.y > this.game.height) {
bug.kill();
}
}, this);
// Create lightning
if (this.game.input.activePointer.justPressed(20) && (!this.cooldownTimer.running || this.cooldownTimer.paused)) {
if (!this.cooldownTimer.running)
this.cooldownTimer.start();
if (this.cooldownTimer.paused)
this.cooldownTimer.paused = false;
// Kill monsters within 64 pixels of the strike
this.monsterGroup.forEachAlive(function(monster) {
if (this.game.math.distance(
this.game.input.activePointer.x, this.game.input.activePointer.y,
monster.x, monster.y) < 64) {
monster.frame = 2; // Show the "dead" texture
monster.body.velocity.y = this.game.rnd.integerInRange(-600, -1200);
monster.body.velocity.x = this.game.rnd.integerInRange(-500, 500);
monster.body.acceleration.y = 3000;
monster.angle = 180;
Config.killed.code++;
// Create an explosion
this.getExplosion(monster.x, monster.y);
}
}, this);
this.bugGroup.forEachAlive(function(bug) {
if (this.game.math.distance(
this.game.input.activePointer.x, this.game.input.activePointer.y,
bug.x, bug.y) < 64) {
bug.frame = 2; // Show the "dead" texture
bug.body.velocity.y = this.game.rnd.integerInRange(-600, -1200);
bug.body.velocity.x = this.game.rnd.integerInRange(-500, 500);
bug.body.acceleration.y = 3000;
bug.angle = 180;
Config.killed.bugs++;
// Create an explosion
this.getExplosion(bug.x, bug.y);
}
}, this);
// Rotate the lightning sprite so it goes in the
// direction of the pointer
this.lightning.rotation =
this.game.math.angleBetween(
this.lightning.x, this.lightning.y,
this.game.input.activePointer.x, this.game.input.activePointer.y
) - Math.PI / 2;
// Calculate the distance from the lightning source to the pointer
var distance = this.game.math.distance(
this.lightning.x, this.lightning.y,
this.game.input.activePointer.x, this.game.input.activePointer.y
);
// Create the lightning texture
this.createLightningTexture(this.lightningBitmap.width / 2, 0, 20, 3, false, distance);
// Make the lightning sprite visible
this.lightning.alpha = 1;
// Fade out the lightning sprite using a tween on the alpha property.
// Check out the "Easing function" examples for more info.
this.game.add.tween(this.lightning)
.to({
alpha: 0.5
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 1.0
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 0.5
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 1.0
}, 100, Phaser.Easing.Bounce.Out)
.to({
| updateTimer | identifier_name |
testing.js | // Show FPS
this.game.time.advancedTiming = true;
this.highscoreText = this.game.add.text(
this.game.width - 240, 20, '', {
font: '16px Arial',
fill: '#ffffff'
}
);
this.timerText = this.game.add.text(20, 20, '', {
font: '16px Arial',
fill: '#ffffff'
});
this.cooldownText = this.game.add.text(200, 20, '', {
font: '16px Arial',
fill: '#ffffff'
});
// Create our Timer
this.cooldownTimer = game.time.create(true);
// Set a TimerEvent to occur after 2 seconds
this.cooldownTimer.loop(Config.cooldown, timeCooldownFn, this);
//this.cooldownTimer.start();
// Create our Timer
this.timer = game.time.create(true);
// Set a TimerEvent to occur after 2 seconds
this.timer.loop(this.TIMER_MAX, timeIsOut, this);
// Start the timer running - this is important!
// It won't start automatically, allowing you to hook it to button events and the like.
this.timer.start();
stateText = game.add.text(game.world.centerX, game.world.centerY, ' ', {
font: '84px Arial',
fill: '#fff'
});
stateText.anchor.setTo(0.5, 0.5);
stateText.visible = false;
}; | this.cooldownTimer.paused = true;
//cooldownText
}
function timeIsOut() {
var codestat = Config.pathed.code;
var bugstat = Config.killed.bugs - Config.pathed.bugs;;
stateText.text = " Time is up!\n Pathed Code: " + codestat.toString() + "\n Catched Buds: " + bugstat.toString();
stateText.visible = true;
this.game.paused = true;
window["GameStage7"].game.score = codestat + bugstat;
window["GameStage7"].game.finished = true;
$scope.$digest();
finishGame();
};
function updateTimer(game) {
minutes = Math.floor(game.timer.duration.toFixed(0) / 60000) % 60;
seconds = Math.floor(game.timer.duration.toFixed(0) / 1000) % 60;
milliseconds = Math.floor(game.timer.duration.toFixed(0)) % 100;
//If any of the digits becomes a single digit number, pad it with a zero
if (milliseconds < 10)
milliseconds = '0' + milliseconds;
if (seconds < 10)
seconds = '0' + seconds;
if (minutes < 10)
minutes = '0' + minutes;
game.timerText.setText('Time last: ' + minutes + ':' + seconds + ':' + milliseconds);
}
function updateCooldown(game) {
minutes = Math.floor(game.cooldownTimer.duration.toFixed(0) / 60000) % 60;
seconds = Math.floor(game.cooldownTimer.duration.toFixed(0) / 1000) % 60;
milliseconds = Math.floor(game.cooldownTimer.duration.toFixed(0)) % 100;
//If any of the digits becomes a single digit number, pad it with a zero
if (milliseconds < 10)
milliseconds = '0' + milliseconds;
if (seconds < 10)
seconds = '0' + seconds;
if (minutes < 10)
minutes = '0' + minutes;
game.cooldownText.setText('Cooldown: ' + minutes + ':' + seconds + ':' + milliseconds);
}
// The update() method is called every frame
GameState.prototype.update = function() {
updateTimer(this);
updateCooldown(this);
var codestat = Config.pathed.code;
var bugstat = Config.killed.bugs - Config.pathed.bugs;
this.highscoreText.setText("Highscore: bugs catched: " + bugstat + '\n code saved: ' + codestat);
//this.cooldownText.setText("Cooldown");
// Spawn a new monster
this.monsterTimer -= this.game.time.elapsed;
if (this.monsterTimer <= 0) {
this.monsterTimer = this.game.rnd.integerInRange(150, 500);
this.createNewMonster();
}
// Spawn a new bug
this.bugTimer -= this.game.time.elapsed;
if (this.bugTimer <= 0) {
this.bugTimer = this.game.rnd.integerInRange(150, 500);
this.createNewBug();
}
// Kill monsters when they go off screen
this.monsterGroup.forEachAlive(function(monster) {
//this.monsterTimer -= this.game.time.elapsed;
/*if (this.monsterTimer <= 0) {
this.monsterTimer = this.game.rnd.integerInRange(150, 500);
if(monster.frame == 0) monster.frame = 1
else monster.frame = 0;
}*/
if (monster.x < -64) {
if (monster.frame != 2) Config.pathed.code++;
monster.kill();
}
if (monster.y > this.game.height) {
monster.kill();
}
}, this);
this.bugGroup.forEachAlive(function(bug) {
/*this.bugTimer -= this.game.time.elapsed;
if (this.bugTimer <= 0) {
this.bugTimer = this.game.rnd.integerInRange(150, 500);
if(bug.frame == 0) bug.frame = 1
else bug.frame = 0;
}*/
if (bug.x < -64) {
if (bug.frame != 2) Config.pathed.bugs++;
bug.kill();
}
if (bug.y > this.game.height) {
bug.kill();
}
}, this);
// Create lightning
if (this.game.input.activePointer.justPressed(20) && (!this.cooldownTimer.running || this.cooldownTimer.paused)) {
if (!this.cooldownTimer.running)
this.cooldownTimer.start();
if (this.cooldownTimer.paused)
this.cooldownTimer.paused = false;
// Kill monsters within 64 pixels of the strike
this.monsterGroup.forEachAlive(function(monster) {
if (this.game.math.distance(
this.game.input.activePointer.x, this.game.input.activePointer.y,
monster.x, monster.y) < 64) {
monster.frame = 2; // Show the "dead" texture
monster.body.velocity.y = this.game.rnd.integerInRange(-600, -1200);
monster.body.velocity.x = this.game.rnd.integerInRange(-500, 500);
monster.body.acceleration.y = 3000;
monster.angle = 180;
Config.killed.code++;
// Create an explosion
this.getExplosion(monster.x, monster.y);
}
}, this);
this.bugGroup.forEachAlive(function(bug) {
if (this.game.math.distance(
this.game.input.activePointer.x, this.game.input.activePointer.y,
bug.x, bug.y) < 64) {
bug.frame = 2; // Show the "dead" texture
bug.body.velocity.y = this.game.rnd.integerInRange(-600, -1200);
bug.body.velocity.x = this.game.rnd.integerInRange(-500, 500);
bug.body.acceleration.y = 3000;
bug.angle = 180;
Config.killed.bugs++;
// Create an explosion
this.getExplosion(bug.x, bug.y);
}
}, this);
// Rotate the lightning sprite so it goes in the
// direction of the pointer
this.lightning.rotation =
this.game.math.angleBetween(
this.lightning.x, this.lightning.y,
this.game.input.activePointer.x, this.game.input.activePointer.y
) - Math.PI / 2;
// Calculate the distance from the lightning source to the pointer
var distance = this.game.math.distance(
this.lightning.x, this.lightning.y,
this.game.input.activePointer.x, this.game.input.activePointer.y
);
// Create the lightning texture
this.createLightningTexture(this.lightningBitmap.width / 2, 0, 20, 3, false, distance);
// Make the lightning sprite visible
this.lightning.alpha = 1;
// Fade out the lightning sprite using a tween on the alpha property.
// Check out the "Easing function" examples for more info.
this.game.add.tween(this.lightning)
.to({
alpha: 0.5
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 1.0
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 0.5
}, 100, Phaser.Easing.Bounce.Out)
.to({
alpha: 1.0
}, 100, Phaser.Easing.Bounce.Out)
.to({
|
function timeCooldownFn() { | random_line_split |
testing.js | : -10
}, 40, Phaser.Easing.Sinusoidal.InOut, false, 0, 5, true)
.start();
}
};
GameState.prototype.createNewMonster = function() {
var monster = this.monsterGroup.getFirstDead(); // Recycle a dead monster
if (monster) {
monster.reset(this.game.width + 100, this.game.height - 48); // Position on ground
monster.revive(); // Set "alive"
monster.body.velocity.setTo(0, 0); // Stop moving
monster.body.acceleration.setTo(0, 0); // Stop accelerating
monster.body.velocity.x = -100; // Move left
monster.rotation = 0; // Reset rotation
monster.frame = 0; // Set animation frame to 0
monster.anchor.setTo(0.5, 0.5); // Center texture
monster.animations.add('walk', [0, 1], 2);
monster.animations.play('walk', 6, true);
}
};
GameState.prototype.createNewBug = function() {
var bug = this.bugGroup.getFirstDead(); // Recycle a dead monster
if (bug) {
//this.game.width +
var ground_x = this.game.width + Math.round(Math.random() * this.game.width);
//console.log('ground_x', ground_x);
bug.reset(ground_x, this.game.height - 48); // Position on ground
bug.revive(); // Set "alive"
bug.body.velocity.setTo(0, 0); // Stop moving
bug.body.acceleration.setTo(0, 0); // Stop accelerating
bug.body.velocity.x = -300; // Move left
bug.rotation = 0; // Reset rotation
bug.frame = 0; // Set animation frame to 0
bug.anchor.setTo(0.5, 0.5); // Center texture
bug.animations.add('walk', [0, 1]);
bug.animations.play('walk', 6, true);
}
};
// Try to get a used explosion from the explosionGroup.
// If an explosion isn't available, create a new one and add it to the group.
// Setup new explosions so that they animate and kill themselves when the
// animation is complete.
GameState.prototype.getExplosion = function(x, y) {
// Get the first dead explosion from the explosionGroup
var explosion = this.explosionGroup.getFirstDead();
// If there aren't any available, create a new one
if (explosion === null) {
explosion = this.game.add.sprite(0, 0, 'explosion');
explosion.anchor.setTo(0.5, 0.5);
// Add an animation for the explosion that kills the sprite when the
// animation is complete. Plays the first frame several times to make the
// explosion more visible after the screen flash.
var animation = explosion.animations.add('boom', [0, 0, 0, 0, 1, 2, 3], 60, false);
animation.killOnComplete = true;
// Add the explosion sprite to the group
this.explosionGroup.add(explosion);
}
//console.log('boom');
// Revive the explosion (set it's alive property to true)
// You can also define a onRevived event handler in your explosion objects
// to do stuff when they are revived.
explosion.revive();
// Move the explosion to the given coordinates
explosion.x = x;
explosion.y = y;
// Set rotation of the explosion at random for a little variety
explosion.angle = this.game.rnd.integerInRange(0, 360);
// Play the animation
explosion.animations.play('boom');
// Return the explosion itself in case we want to do anything else with it
return explosion;
};
// This function creates a texture that looks like a lightning bolt
GameState.prototype.createLightningTexture = function(x, y, segments, boltWidth, branch, distance) {
// Get the canvas drawing context for the lightningBitmap
var ctx = this.lightningBitmap.context;
var width = this.lightningBitmap.width;
var height = this.lightningBitmap.height;
// Our lightning will be made up of several line segments starting at
// the center of the top edge of the bitmap and ending at the target.
// Clear the canvas
if (!branch) ctx.clearRect(0, 0, width, height);
// Draw each of the segments
for (var i = 0; i < segments; i++) {
// Set the lightning color and bolt width
ctx.strokeStyle = 'rgb(255, 255, 255)';
ctx.lineWidth = boltWidth;
ctx.beginPath();
ctx.moveTo(x, y);
// Calculate an x offset from the end of the last line segment and
// keep it within the bounds of the bitmap
if (branch) {
// For a branch
x += this.game.rnd.integerInRange(-10, 10);
} else {
// For the main bolt
x += this.game.rnd.integerInRange(-30, 30);
}
if (x <= 10) x = 10;
if (x >= width - 10) x = width - 10;
// Calculate a y offset from the end of the last line segment.
// When we've reached the target or there are no more segments left,
// set the y position to the distance to the target. For branches, we
// don't care if they reach the target so don't set the last coordinate
// to the target if it's hanging in the air.
if (branch) {
// For a branch
y += this.game.rnd.integerInRange(10, 20);
} else {
// For the main bolt
y += this.game.rnd.integerInRange(20, distance / segments);
}
if ((!branch && i == segments - 1) || y > distance) {
// This causes the bolt to always terminate at the center
// lightning bolt bounding box at the correct distance to
// the target. Because of the way the lightning sprite is
// rotated, this causes this point to be exactly where the
// player clicked or tapped.
y = distance;
if (!branch) x = width / 2;
}
// Draw the line segment
ctx.lineTo(x, y);
ctx.stroke();
// Quit when we've reached the target
if (y >= distance) break;
// Draw a branch 20% of the time off the main bolt only
if (!branch) {
if (this.game.math.chanceRoll(20)) {
// Draws another, thinner, bolt starting from this position
this.createLightningTexture(x, y, 10, 1, true, distance);
}
}
}
// This just tells the engine it should update the texture cache
this.lightningBitmap.dirty = true;
};
// Fragment shaders are small programs that run on the graphics card and alter
// the pixels of a texture. Every framework implements shaders differently but
// the concept is the same. This shader takes the lightning texture and alters
// the pixels so that it appears to be glowing. Shader programming itself is
// beyond the scope of this tutorial.
//
// There are a ton of good resources out there to learn it. Odds are that your
// framework already includes many of the most popular shaders out of the box.
//
// This is an OpenGL/WebGL feature. Because it runs in your web browser
// you need a browser that support WebGL for this to work.
Phaser.Filter.Glow = function(game) {
Phaser.Filter.call(this, game);
this.fragmentSrc = [
"precision lowp float;",
"varying vec2 vTextureCoord;",
"varying vec4 vColor;",
'uniform sampler2D uSampler;',
'void main() {',
'vec4 sum = vec4(0);',
'vec2 texcoord = vTextureCoord;',
'for(int xx = -4; xx <= 4; xx++) {',
'for(int yy = -3; yy <= 3; yy++) {',
'float dist = sqrt(float(xx*xx) + float(yy*yy));',
'float factor = 0.0;',
'if (dist == 0.0) {',
'factor = 2.0;',
'} else {',
'factor = 2.0/abs(float(dist));',
'}',
'sum += texture2D(uSampler, texcoord + vec2(xx, yy) * 0.002) * factor;',
'}',
'}',
'gl_FragColor = sum * 0.025 + texture2D(uSampler, texcoord);',
'}'
];
};
Phaser.Filter.Glow.prototype = Object.create(Phaser.Filter.prototype);
Phaser.Filter.Glow.prototype.constructor = Phaser.Filter.Glow;
function init(scope, finish) {
game = new Phaser.Game(1200, 600, Phaser.AUTO, 'game');
game.state.add('game', GameState, true);
$scope = scope;
finishGame = finish;
};
function destroy() | {} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.