author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
259,853
27.05.2020 10:16:09
25,200
17d500f907c7bfc4d8365cceb4b305e9e3e6733c
g3doc/fuse: add more references
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/g3doc/fuse.md", "new_path": "pkg/sentry/fs/g3doc/fuse.md", "diff": "@@ -99,7 +99,7 @@ ops can be implemented in parallel.\n## FUSE Protocol\nThe FUSE protocol is a request-response protocol. All requests are initiated by\n-the client. The wire-format for the protocol is raw c structs serialized to\n+the client. The wire-format for the protocol is raw C structs serialized to\nmemory.\nAll FUSE requests begin with the following request header:\n@@ -255,6 +255,7 @@ I/O syscalls like `read(2)`, `write(2)` and `mmap(2)`.\n# References\n-- `fuse(4)` manpage.\n-- Linux kernel FUSE documentation:\n- https://www.kernel.org/doc/html/latest/filesystems/fuse.html\n+- [fuse(4) Linux manual page](https://www.man7.org/linux/man-pages/man4/fuse.4.html)\n+- [Linux kernel FUSE documentation](https://www.kernel.org/doc/html/latest/filesystems/fuse.html)\n+- [The reference implementation of the Linux FUSE (Filesystem in Userspace) interface](https://github.com/libfuse/libfuse)\n+- [The kernel interface of FUSE](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fuse.h)\n" } ]
Go
Apache License 2.0
google/gvisor
g3doc/fuse: add more references
259,858
27.05.2020 10:47:42
25,200
0bc022b7f3c13bb7c5c8d47d1781820161e7b1ad
Fix push for Go branch.
[ { "change_type": "MODIFY", "old_path": ".github/workflows/go.yml", "new_path": ".github/workflows/go.yml", "diff": "@@ -19,6 +19,12 @@ jobs:\n\"${{ github.event.pull_request.statuses_url }}\"\nif: github.event_name == 'pull_request'\n- uses: actions/checkout@v2\n+ if: github.event_name == 'push'\n+ with:\n+ fetch-depth: 0\n+ token: '${{ secrets.GO_TOKEN }}'\n+ - uses: actions/checkout@v2\n+ if: github.event_name == 'pull_request'\nwith:\nfetch-depth: 0\n- uses: actions/setup-go@v2\n@@ -42,10 +48,6 @@ jobs:\n- run: go build ./...\n- if: github.event_name == 'push'\nrun: |\n- # Required dedicated credentials for the Go branch, due to the way\n- # branch protection rules are configured.\n- git config --global credential.helper cache\n- echo -e \"protocol=https\\nhost=github.com\\nusername=${{ secrets.GO_TOKEN }}\\npassword=x-oauth-basic\" | git credential approve\ngit remote add upstream \"https://github.com/${{ github.repository }}\"\ngit push upstream go:go\n- if: ${{ success() && github.event_name == 'pull_request' }}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix push for Go branch. PiperOrigin-RevId: 313419745
259,858
27.05.2020 15:46:07
25,200
26bbecf00f51e8dca60ce8b1d33f7ebbbf92d268
Ensure sitemap is generated.
[ { "change_type": "MODIFY", "old_path": "images/jekyll/Dockerfile", "new_path": "images/jekyll/Dockerfile", "diff": "@@ -8,5 +8,6 @@ RUN gem install \\\njekyll-paginate:1.1.0 \\\nkramdown-parser-gfm:1.1.0 \\\njekyll-relative-links:0.6.1 \\\n- jekyll-feed:0.13.0\n+ jekyll-feed:0.13.0 \\\n+ jekyll-sitemap:1.4.0\nCMD [\"/usr/gem/gems/jekyll-4.0.0/exe/jekyll\", \"build\", \"-t\", \"-s\", \"/input\", \"-d\", \"/output\"]\n" }, { "change_type": "MODIFY", "old_path": "website/_config.yml", "new_path": "website/_config.yml", "diff": "@@ -12,6 +12,7 @@ plugins:\n- jekyll-inline-svg\n- jekyll-relative-links\n- jekyll-feed\n+ - jekyll-sitemap\nsite_url: https://gvisor.dev\nfeed:\npath: blog/index.xml\n" } ]
Go
Apache License 2.0
google/gvisor
Ensure sitemap is generated. PiperOrigin-RevId: 313478820
259,854
28.05.2020 14:42:01
25,200
7b79370c105b28a1cffa2d12d81898fc6b278728
Add pcap logging to pcaketimpact. This makes debugging packetimpact tests much easier.
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/packetimpact_test.go", "new_path": "test/packetimpact/runner/packetimpact_test.go", "diff": "@@ -18,9 +18,12 @@ package packetimpact_test\nimport (\n\"flag\"\n\"fmt\"\n+ \"io/ioutil\"\n\"log\"\n\"math/rand\"\n\"net\"\n+ \"os\"\n+ \"os/exec\"\n\"path\"\n\"strings\"\n\"testing\"\n@@ -117,10 +120,18 @@ func TestOne(t *testing.T) {\n}(dn)\n}\n+ tmpDir, err := ioutil.TempDir(\"\", \"container-output\")\n+ if err != nil {\n+ t.Fatal(\"creating temp dir:\", err)\n+ }\n+ defer os.RemoveAll(tmpDir)\n+\n+ const testOutputDir = \"/tmp/testoutput\"\n+\nrunOpts := dockerutil.RunOpts{\nImage: \"packetimpact\",\nCapAdd: []string{\"NET_ADMIN\"},\n- Extra: []string{\"--sysctl\", \"net.ipv6.conf.all.disable_ipv6=0\", \"--rm\"},\n+ Extra: []string{\"--sysctl\", \"net.ipv6.conf.all.disable_ipv6=0\", \"--rm\", \"-v\", tmpDir + \":\" + testOutputDir},\nForeground: true,\n}\n@@ -187,7 +198,10 @@ func TestOne(t *testing.T) {\n// Run tcpdump in the test bench unbuffered, without DNS resolution, just on\n// the interface with the test packets.\nsnifferArgs := []string{\n- \"tcpdump\", \"-S\", \"-vvv\", \"-U\", \"-n\", \"-i\", testNetDev,\n+ \"tcpdump\",\n+ \"-S\", \"-vvv\", \"-U\", \"-n\",\n+ \"-i\", testNetDev,\n+ \"-w\", testOutputDir + \"/dump.pcap\",\n}\nsnifferRegex := \"tcpdump: listening.*\\n\"\nif *tshark {\n@@ -201,6 +215,12 @@ func TestOne(t *testing.T) {\nsnifferRegex = \"Capturing on.*\\n\"\n}\n+ defer func() {\n+ if err := exec.Command(\"/bin/cp\", \"-r\", tmpDir, os.Getenv(\"TEST_UNDECLARED_OUTPUTS_DIR\")).Run(); err != nil {\n+ t.Error(\"unable to copy container output files:\", err)\n+ }\n+ }()\n+\nif err := testbench.Create(runOpts, snifferArgs...); err != nil {\nt.Fatalf(\"unable to create container %s: %s\", testbench.Name, err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Add pcap logging to pcaketimpact. This makes debugging packetimpact tests much easier. PiperOrigin-RevId: 313662654
259,992
28.05.2020 14:45:52
25,200
f7418e21590e271302a3c375323950c209ce5ced
Move Cleanup to its own package
[ { "change_type": "ADD", "old_path": null, "new_path": "pkg/cleanup/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"cleanup\",\n+ srcs = [\"cleanup.go\"],\n+ visibility = [\"//:sandbox\"],\n+ deps = [\n+ ],\n+)\n+\n+go_test(\n+ name = \"cleanup_test\",\n+ srcs = [\"cleanup_test.go\"],\n+ library = \":cleanup\",\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/cleanup/cleanup.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package cleanup provides utilities to clean \"stuff\" on defers.\n+package cleanup\n+\n+// Cleanup allows defers to be aborted when cleanup needs to happen\n+// conditionally. Usage:\n+// cu := cleanup.Make(func() { f.Close() })\n+// defer cu.Clean() // failure before release is called will close the file.\n+// ...\n+// cu.Add(func() { f2.Close() }) // Adds another cleanup function\n+// ...\n+// cu.Release() // on success, aborts closing the file.\n+// return f\n+type Cleanup struct {\n+ cleaners []func()\n+}\n+\n+// Make creates a new Cleanup object.\n+func Make(f func()) Cleanup {\n+ return Cleanup{cleaners: []func(){f}}\n+}\n+\n+// Add adds a new function to be called on Clean().\n+func (c *Cleanup) Add(f func()) {\n+ c.cleaners = append(c.cleaners, f)\n+}\n+\n+// Clean calls all cleanup functions in reverse order.\n+func (c *Cleanup) Clean() {\n+ clean(c.cleaners)\n+ c.cleaners = nil\n+}\n+\n+// Release releases the cleanup from its duties, i.e. cleanup functions are not\n+// called after this point. Returns a function that calls all registered\n+// functions in case the caller has use for them.\n+func (c *Cleanup) Release() func() {\n+ old := c.cleaners\n+ c.cleaners = nil\n+ return func() { clean(old) }\n+}\n+\n+func clean(cleaners []func()) {\n+ for i := len(cleaners) - 1; i >= 0; i-- {\n+ cleaners[i]()\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/cleanup/cleanup_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package cleanup\n+\n+import \"testing\"\n+\n+func testCleanupHelper(clean, cleanAdd *bool, release bool) func() {\n+ cu := Make(func() {\n+ *clean = true\n+ })\n+ cu.Add(func() {\n+ *cleanAdd = true\n+ })\n+ defer cu.Clean()\n+ if release {\n+ return cu.Release()\n+ }\n+ return nil\n+}\n+\n+func TestCleanup(t *testing.T) {\n+ clean := false\n+ cleanAdd := false\n+ testCleanupHelper(&clean, &cleanAdd, false)\n+ if !clean {\n+ t.Fatalf(\"cleanup function was not called.\")\n+ }\n+ if !cleanAdd {\n+ t.Fatalf(\"added cleanup function was not called.\")\n+ }\n+}\n+\n+func TestRelease(t *testing.T) {\n+ clean := false\n+ cleanAdd := false\n+ cleaner := testCleanupHelper(&clean, &cleanAdd, true)\n+\n+ // Check that clean was not called after release.\n+ if clean {\n+ t.Fatalf(\"cleanup function was called.\")\n+ }\n+ if cleanAdd {\n+ t.Fatalf(\"added cleanup function was called.\")\n+ }\n+\n+ // Call the cleaner function and check that both cleanup functions are called.\n+ cleaner()\n+ if !clean {\n+ t.Fatalf(\"cleanup function was not called.\")\n+ }\n+ if !cleanAdd {\n+ t.Fatalf(\"added cleanup function was not called.\")\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cgroup/BUILD", "new_path": "runsc/cgroup/BUILD", "diff": "@@ -7,8 +7,8 @@ go_library(\nsrcs = [\"cgroup.go\"],\nvisibility = [\"//:sandbox\"],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/log\",\n- \"//runsc/specutils\",\n\"@com_github_cenkalti_backoff//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n],\n" }, { "change_type": "MODIFY", "old_path": "runsc/cgroup/cgroup.go", "new_path": "runsc/cgroup/cgroup.go", "diff": "@@ -31,8 +31,8 @@ import (\n\"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/log\"\n- \"gvisor.dev/gvisor/runsc/specutils\"\n)\nconst (\n@@ -246,7 +246,7 @@ func (c *Cgroup) Install(res *specs.LinuxResources) error {\n// The Cleanup object cleans up partially created cgroups when an error occurs.\n// Errors occuring during cleanup itself are ignored.\n- clean := specutils.MakeCleanup(func() { _ = c.Uninstall() })\n+ clean := cleanup.Make(func() { _ = c.Uninstall() })\ndefer clean.Clean()\nfor key, cfg := range controllers {\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/BUILD", "new_path": "runsc/container/BUILD", "diff": "@@ -16,6 +16,7 @@ go_library(\n],\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/cleanup\",\n\"//pkg/log\",\n\"//pkg/sentry/control\",\n\"//pkg/sentry/sighandling\",\n@@ -53,6 +54,7 @@ go_test(\ndeps = [\n\"//pkg/abi/linux\",\n\"//pkg/bits\",\n+ \"//pkg/cleanup\",\n\"//pkg/log\",\n\"//pkg/sentry/control\",\n\"//pkg/sentry/kernel\",\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container.go", "new_path": "runsc/container/container.go", "diff": "@@ -31,6 +31,7 @@ import (\n\"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/control\"\n\"gvisor.dev/gvisor/pkg/sentry/sighandling\"\n@@ -293,7 +294,7 @@ func New(conf *boot.Config, args Args) (*Container, error) {\n}\n// The Cleanup object cleans up partially created containers when an error\n// occurs. Any errors occurring during cleanup itself are ignored.\n- cu := specutils.MakeCleanup(func() { _ = c.Destroy() })\n+ cu := cleanup.Make(func() { _ = c.Destroy() })\ndefer cu.Clean()\n// Lock the container metadata file to prevent concurrent creations of\n@@ -402,7 +403,7 @@ func (c *Container) Start(conf *boot.Config) error {\nif err := c.Saver.lock(); err != nil {\nreturn err\n}\n- unlock := specutils.MakeCleanup(func() { c.Saver.unlock() })\n+ unlock := cleanup.Make(func() { c.Saver.unlock() })\ndefer unlock.Clean()\nif err := c.requireStatus(\"start\", Created); err != nil {\n@@ -506,7 +507,7 @@ func Run(conf *boot.Config, args Args) (syscall.WaitStatus, error) {\n}\n// Clean up partially created container if an error occurs.\n// Any errors returned by Destroy() itself are ignored.\n- cu := specutils.MakeCleanup(func() {\n+ cu := cleanup.Make(func() {\nc.Destroy()\n})\ndefer cu.Clean()\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -27,6 +27,7 @@ import (\n\"time\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/sentry/control\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sync\"\n@@ -64,29 +65,16 @@ func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*C\npanic(\"conf.RootDir not set. Call testutil.SetupRootDir() to set.\")\n}\n- var (\n- containers []*Container\n- cleanups []func()\n- )\n- cleanups = append(cleanups, func() {\n- for _, c := range containers {\n- c.Destroy()\n- }\n- })\n- cleanupAll := func() {\n- for _, c := range cleanups {\n- c()\n- }\n- }\n- localClean := specutils.MakeCleanup(cleanupAll)\n- defer localClean.Clean()\n+ cu := cleanup.Cleanup{}\n+ defer cu.Clean()\n+ var containers []*Container\nfor i, spec := range specs {\nbundleDir, cleanup, err := testutil.SetupBundleDir(spec)\nif err != nil {\nreturn nil, nil, fmt.Errorf(\"error setting up container: %v\", err)\n}\n- cleanups = append(cleanups, cleanup)\n+ cu.Add(cleanup)\nargs := Args{\nID: ids[i],\n@@ -97,6 +85,7 @@ func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*C\nif err != nil {\nreturn nil, nil, fmt.Errorf(\"error creating container: %v\", err)\n}\n+ cu.Add(func() { cont.Destroy() })\ncontainers = append(containers, cont)\nif err := cont.Start(conf); err != nil {\n@@ -104,8 +93,7 @@ func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*C\n}\n}\n- localClean.Release()\n- return containers, cleanupAll, nil\n+ return containers, cu.Release(), nil\n}\ntype execDesc struct {\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/BUILD", "new_path": "runsc/fsgofer/BUILD", "diff": "@@ -13,12 +13,12 @@ go_library(\nvisibility = [\"//runsc:__subpackages__\"],\ndeps = [\n\"//pkg/abi/linux\",\n+ \"//pkg/cleanup\",\n\"//pkg/fd\",\n\"//pkg/log\",\n\"//pkg/p9\",\n\"//pkg/sync\",\n\"//pkg/syserr\",\n- \"//runsc/specutils\",\n\"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -33,11 +33,11 @@ import (\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/fd\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/p9\"\n\"gvisor.dev/gvisor/pkg/sync\"\n- \"gvisor.dev/gvisor/runsc/specutils\"\n)\nconst (\n@@ -439,7 +439,7 @@ func (l *localFile) Create(name string, mode p9.OpenFlags, perm p9.FileMode, uid\nif err != nil {\nreturn nil, nil, p9.QID{}, 0, extractErrno(err)\n}\n- cu := specutils.MakeCleanup(func() {\n+ cu := cleanup.Make(func() {\nchild.Close()\n// Best effort attempt to remove the file in case of failure.\nif err := syscall.Unlinkat(l.file.FD(), name); err != nil {\n@@ -480,7 +480,7 @@ func (l *localFile) Mkdir(name string, perm p9.FileMode, uid p9.UID, gid p9.GID)\nif err := syscall.Mkdirat(l.file.FD(), name, uint32(perm.Permissions())); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- cu := specutils.MakeCleanup(func() {\n+ cu := cleanup.Make(func() {\n// Best effort attempt to remove the dir in case of failure.\nif err := unix.Unlinkat(l.file.FD(), name, unix.AT_REMOVEDIR); err != nil {\nlog.Warningf(\"error unlinking dir %q after failure: %v\", path.Join(l.hostPath, name), err)\n@@ -864,7 +864,7 @@ func (l *localFile) Symlink(target, newName string, uid p9.UID, gid p9.GID) (p9.\nif err := unix.Symlinkat(target, l.file.FD(), newName); err != nil {\nreturn p9.QID{}, extractErrno(err)\n}\n- cu := specutils.MakeCleanup(func() {\n+ cu := cleanup.Make(func() {\n// Best effort attempt to remove the symlink in case of failure.\nif err := syscall.Unlinkat(l.file.FD(), newName); err != nil {\nlog.Warningf(\"error unlinking file %q after failure: %v\", path.Join(l.hostPath, newName), err)\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/BUILD", "new_path": "runsc/sandbox/BUILD", "diff": "@@ -13,6 +13,7 @@ go_library(\n\"//runsc:__subpackages__\",\n],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/control/client\",\n\"//pkg/control/server\",\n\"//pkg/log\",\n" }, { "change_type": "MODIFY", "old_path": "runsc/sandbox/sandbox.go", "new_path": "runsc/sandbox/sandbox.go", "diff": "@@ -30,6 +30,7 @@ import (\n\"github.com/cenkalti/backoff\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"github.com/syndtr/gocapability/capability\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/control/client\"\n\"gvisor.dev/gvisor/pkg/control/server\"\n\"gvisor.dev/gvisor/pkg/log\"\n@@ -119,7 +120,7 @@ func New(conf *boot.Config, args *Args) (*Sandbox, error) {\ns := &Sandbox{ID: args.ID, Cgroup: args.Cgroup}\n// The Cleanup object cleans up partially created sandboxes when an error\n// occurs. Any errors occurring during cleanup itself are ignored.\n- c := specutils.MakeCleanup(func() {\n+ c := cleanup.Make(func() {\nerr := s.destroy()\nlog.Warningf(\"error destroying sandbox: %v\", err)\n})\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/specutils.go", "new_path": "runsc/specutils/specutils.go", "diff": "@@ -444,36 +444,6 @@ func ContainsStr(strs []string, str string) bool {\nreturn false\n}\n-// Cleanup allows defers to be aborted when cleanup needs to happen\n-// conditionally. Usage:\n-// c := MakeCleanup(func() { f.Close() })\n-// defer c.Clean() // any failure before release is called will close the file.\n-// ...\n-// c.Release() // on success, aborts closing the file and return it.\n-// return f\n-type Cleanup struct {\n- clean func()\n-}\n-\n-// MakeCleanup creates a new Cleanup object.\n-func MakeCleanup(f func()) Cleanup {\n- return Cleanup{clean: f}\n-}\n-\n-// Clean calls the cleanup function.\n-func (c *Cleanup) Clean() {\n- if c.clean != nil {\n- c.clean()\n- c.clean = nil\n- }\n-}\n-\n-// Release releases the cleanup from its duties, i.e. cleanup function is not\n-// called after this point.\n-func (c *Cleanup) Release() {\n- c.clean = nil\n-}\n-\n// RetryEintr retries the function until an error different than EINTR is\n// returned.\nfunc RetryEintr(f func() (uintptr, uintptr, error)) (uintptr, uintptr, error) {\n" }, { "change_type": "MODIFY", "old_path": "test/root/BUILD", "new_path": "test/root/BUILD", "diff": "@@ -33,6 +33,7 @@ go_test(\n],\nvisibility = [\"//:sandbox\"],\ndeps = [\n+ \"//pkg/cleanup\",\n\"//pkg/test/criutil\",\n\"//pkg/test/dockerutil\",\n\"//pkg/test/testutil\",\n" }, { "change_type": "MODIFY", "old_path": "test/root/crictl_test.go", "new_path": "test/root/crictl_test.go", "diff": "@@ -30,10 +30,10 @@ import (\n\"testing\"\n\"time\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/test/criutil\"\n\"gvisor.dev/gvisor/pkg/test/dockerutil\"\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n- \"gvisor.dev/gvisor/runsc/specutils\"\n)\n// Tests for crictl have to be run as root (rather than in a user namespace)\n@@ -272,27 +272,20 @@ disabled_plugins = [\"restart\"]\n// * Runs containerd and waits for it to reach a \"ready\" state for testing.\n// * Returns a cleanup function that should be called at the end of the test.\nfunc setup(t *testing.T) (*criutil.Crictl, func(), error) {\n- var cleanups []func()\n- cleanupFunc := func() {\n- for i := len(cleanups) - 1; i >= 0; i-- {\n- cleanups[i]()\n- }\n- }\n- cleanup := specutils.MakeCleanup(cleanupFunc)\n- defer cleanup.Clean()\n-\n// Create temporary containerd root and state directories, and a socket\n// via which crictl and containerd communicate.\ncontainerdRoot, err := ioutil.TempDir(testutil.TmpDir(), \"containerd-root\")\nif err != nil {\nt.Fatalf(\"failed to create containerd root: %v\", err)\n}\n- cleanups = append(cleanups, func() { os.RemoveAll(containerdRoot) })\n+ cu := cleanup.Make(func() { os.RemoveAll(containerdRoot) })\n+ defer cu.Clean()\n+\ncontainerdState, err := ioutil.TempDir(testutil.TmpDir(), \"containerd-state\")\nif err != nil {\nt.Fatalf(\"failed to create containerd state: %v\", err)\n}\n- cleanups = append(cleanups, func() { os.RemoveAll(containerdState) })\n+ cu.Add(func() { os.RemoveAll(containerdState) })\nsockAddr := filepath.Join(testutil.TmpDir(), \"containerd-test.sock\")\n// We rewrite a configuration. This is based on the current docker\n@@ -305,7 +298,7 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) {\nif err != nil {\nt.Fatalf(\"failed to write containerd config\")\n}\n- cleanups = append(cleanups, configCleanup)\n+ cu.Add(configCleanup)\n// Start containerd.\ncmd := exec.Command(getContainerd(),\n@@ -321,7 +314,8 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) {\nstdout := &bytes.Buffer{}\ncmd.Stderr = io.MultiWriter(startupW, stderr)\ncmd.Stdout = io.MultiWriter(startupW, stdout)\n- cleanups = append(cleanups, func() {\n+ cu.Add(func() {\n+ // Log output in case of failure.\nt.Logf(\"containerd stdout: %s\", stdout.String())\nt.Logf(\"containerd stderr: %s\", stderr.String())\n})\n@@ -338,15 +332,14 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) {\n// Kill must be the last cleanup (as it will be executed first).\ncc := criutil.NewCrictl(t, sockAddr)\n- cleanups = append(cleanups, func() {\n+ cu.Add(func() {\ncc.CleanUp() // Remove tmp files, etc.\nif err := testutil.KillCommand(cmd); err != nil {\nlog.Printf(\"error killing containerd: %v\", err)\n}\n})\n- cleanup.Release()\n- return cc, cleanupFunc, nil\n+ return cc, cu.Release(), nil\n}\n// httpGet GETs the contents of a file served from a pod on port 80.\n" }, { "change_type": "MODIFY", "old_path": "test/root/oom_score_adj_test.go", "new_path": "test/root/oom_score_adj_test.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"testing\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.dev/gvisor/pkg/cleanup\"\n\"gvisor.dev/gvisor/pkg/test/testutil\"\n\"gvisor.dev/gvisor/runsc/container\"\n\"gvisor.dev/gvisor/runsc/specutils\"\n@@ -324,40 +325,26 @@ func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {\n}\nfunc startContainers(t *testing.T, specs []*specs.Spec, ids []string) ([]*container.Container, func(), error) {\n- var (\n- containers []*container.Container\n- cleanups []func()\n- )\n- cleanups = append(cleanups, func() {\n- for _, c := range containers {\n- c.Destroy()\n- }\n- })\n- cleanupAll := func() {\n- for _, c := range cleanups {\n- c()\n- }\n- }\n- localClean := specutils.MakeCleanup(cleanupAll)\n- defer localClean.Clean()\n+ var containers []*container.Container\n// All containers must share the same root.\n- rootDir, cleanup, err := testutil.SetupRootDir()\n+ rootDir, clean, err := testutil.SetupRootDir()\nif err != nil {\nt.Fatalf(\"error creating root dir: %v\", err)\n}\n- cleanups = append(cleanups, cleanup)\n+ cu := cleanup.Make(clean)\n+ defer cu.Clean()\n// Point this to from the configuration.\nconf := testutil.TestConfig(t)\nconf.RootDir = rootDir\nfor i, spec := range specs {\n- bundleDir, cleanup, err := testutil.SetupBundleDir(spec)\n+ bundleDir, clean, err := testutil.SetupBundleDir(spec)\nif err != nil {\nreturn nil, nil, fmt.Errorf(\"error setting up bundle: %v\", err)\n}\n- cleanups = append(cleanups, cleanup)\n+ cu.Add(clean)\nargs := container.Args{\nID: ids[i],\n@@ -375,6 +362,5 @@ func startContainers(t *testing.T, specs []*specs.Spec, ids []string) ([]*contai\n}\n}\n- localClean.Release()\n- return containers, cleanupAll, nil\n+ return containers, cu.Release(), nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Move Cleanup to its own package PiperOrigin-RevId: 313663382
259,891
28.05.2020 16:44:15
25,200
c55b84e16aeb4481106661e3877c50edbf281762
Enable iptables source filtering (-s/--source)
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/netfilter.go", "new_path": "pkg/sentry/socket/netfilter/netfilter.go", "diff": "@@ -64,6 +64,8 @@ const enableLogging = false\nvar emptyFilter = stack.IPHeaderFilter{\nDst: \"\\x00\\x00\\x00\\x00\",\nDstMask: \"\\x00\\x00\\x00\\x00\",\n+ Src: \"\\x00\\x00\\x00\\x00\",\n+ SrcMask: \"\\x00\\x00\\x00\\x00\",\n}\n// nflog logs messages related to the writing and reading of iptables.\n@@ -214,11 +216,16 @@ func convertNetstackToBinary(tablename string, table stack.Table) (linux.KernelI\n}\ncopy(entry.IPTEntry.IP.Dst[:], rule.Filter.Dst)\ncopy(entry.IPTEntry.IP.DstMask[:], rule.Filter.DstMask)\n+ copy(entry.IPTEntry.IP.Src[:], rule.Filter.Src)\n+ copy(entry.IPTEntry.IP.SrcMask[:], rule.Filter.SrcMask)\ncopy(entry.IPTEntry.IP.OutputInterface[:], rule.Filter.OutputInterface)\ncopy(entry.IPTEntry.IP.OutputInterfaceMask[:], rule.Filter.OutputInterfaceMask)\nif rule.Filter.DstInvert {\nentry.IPTEntry.IP.InverseFlags |= linux.IPT_INV_DSTIP\n}\n+ if rule.Filter.SrcInvert {\n+ entry.IPTEntry.IP.InverseFlags |= linux.IPT_INV_SRCIP\n+ }\nif rule.Filter.OutputInterfaceInvert {\nentry.IPTEntry.IP.InverseFlags |= linux.IPT_INV_VIA_OUT\n}\n@@ -737,6 +744,9 @@ func filterFromIPTIP(iptip linux.IPTIP) (stack.IPHeaderFilter, error) {\nif len(iptip.Dst) != header.IPv4AddressSize || len(iptip.DstMask) != header.IPv4AddressSize {\nreturn stack.IPHeaderFilter{}, fmt.Errorf(\"incorrect length of destination (%d) and/or destination mask (%d) fields\", len(iptip.Dst), len(iptip.DstMask))\n}\n+ if len(iptip.Src) != header.IPv4AddressSize || len(iptip.SrcMask) != header.IPv4AddressSize {\n+ return stack.IPHeaderFilter{}, fmt.Errorf(\"incorrect length of source (%d) and/or source mask (%d) fields\", len(iptip.Src), len(iptip.SrcMask))\n+ }\nn := bytes.IndexByte([]byte(iptip.OutputInterface[:]), 0)\nif n == -1 {\n@@ -755,6 +765,9 @@ func filterFromIPTIP(iptip linux.IPTIP) (stack.IPHeaderFilter, error) {\nDst: tcpip.Address(iptip.Dst[:]),\nDstMask: tcpip.Address(iptip.DstMask[:]),\nDstInvert: iptip.InverseFlags&linux.IPT_INV_DSTIP != 0,\n+ Src: tcpip.Address(iptip.Src[:]),\n+ SrcMask: tcpip.Address(iptip.SrcMask[:]),\n+ SrcInvert: iptip.InverseFlags&linux.IPT_INV_SRCIP != 0,\nOutputInterface: ifname,\nOutputInterfaceMask: ifnameMask,\nOutputInterfaceInvert: iptip.InverseFlags&linux.IPT_INV_VIA_OUT != 0,\n@@ -765,15 +778,13 @@ func containsUnsupportedFields(iptip linux.IPTIP) bool {\n// The following features are supported:\n// - Protocol\n// - Dst and DstMask\n+ // - Src and SrcMask\n// - The inverse destination IP check flag\n// - OutputInterface, OutputInterfaceMask and its inverse.\n- var emptyInetAddr = linux.InetAddr{}\nvar emptyInterface = [linux.IFNAMSIZ]byte{}\n// Disable any supported inverse flags.\n- inverseMask := uint8(linux.IPT_INV_DSTIP) | uint8(linux.IPT_INV_VIA_OUT)\n- return iptip.Src != emptyInetAddr ||\n- iptip.SrcMask != emptyInetAddr ||\n- iptip.InputInterface != emptyInterface ||\n+ inverseMask := uint8(linux.IPT_INV_DSTIP) | uint8(linux.IPT_INV_SRCIP) | uint8(linux.IPT_INV_VIA_OUT)\n+ return iptip.InputInterface != emptyInterface ||\niptip.InputInterfaceMask != emptyInterface ||\niptip.Flags != 0 ||\niptip.InverseFlags&^inverseMask != 0\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables.go", "new_path": "pkg/tcpip/stack/iptables.go", "diff": "@@ -16,7 +16,6 @@ package stack\nimport (\n\"fmt\"\n- \"strings\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -314,7 +313,7 @@ func (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx\n}\n// Check whether the packet matches the IP header filter.\n- if !filterMatch(rule.Filter, header.IPv4(pkt.NetworkHeader), hook, nicName) {\n+ if !rule.Filter.match(header.IPv4(pkt.NetworkHeader), hook, nicName) {\n// Continue on to the next rule.\nreturn RuleJump, ruleIdx + 1\n}\n@@ -335,47 +334,3 @@ func (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx\n// All the matchers matched, so run the target.\nreturn rule.Target.Action(pkt, &it.connections, hook, gso, r, address)\n}\n-\n-func filterMatch(filter IPHeaderFilter, hdr header.IPv4, hook Hook, nicName string) bool {\n- // TODO(gvisor.dev/issue/170): Support other fields of the filter.\n- // Check the transport protocol.\n- if filter.Protocol != 0 && filter.Protocol != hdr.TransportProtocol() {\n- return false\n- }\n-\n- // Check the destination IP.\n- dest := hdr.DestinationAddress()\n- matches := true\n- for i := range filter.Dst {\n- if dest[i]&filter.DstMask[i] != filter.Dst[i] {\n- matches = false\n- break\n- }\n- }\n- if matches == filter.DstInvert {\n- return false\n- }\n-\n- // Check the output interface.\n- // TODO(gvisor.dev/issue/170): Add the check for FORWARD and POSTROUTING\n- // hooks after supported.\n- if hook == Output {\n- n := len(filter.OutputInterface)\n- if n == 0 {\n- return true\n- }\n-\n- // If the interface name ends with '+', any interface which begins\n- // with the name should be matched.\n- ifName := filter.OutputInterface\n- matches = true\n- if strings.HasSuffix(ifName, \"+\") {\n- matches = strings.HasPrefix(nicName, ifName[:n-1])\n- } else {\n- matches = nicName == ifName\n- }\n- return filter.OutputInterfaceInvert != matches\n- }\n-\n- return true\n-}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables_types.go", "new_path": "pkg/tcpip/stack/iptables_types.go", "diff": "package stack\nimport (\n+ \"strings\"\n+\n\"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n)\n// A Hook specifies one of the hooks built into the network stack.\n@@ -159,6 +162,16 @@ type IPHeaderFilter struct {\n// comparison.\nDstInvert bool\n+ // Src matches the source IP address.\n+ Src tcpip.Address\n+\n+ // SrcMask masks bits of the source IP address when comparing with Src.\n+ SrcMask tcpip.Address\n+\n+ // SrcInvert inverts the meaning of the source IP check, i.e. when true the\n+ // filter will match packets that fail the source comparison.\n+ SrcInvert bool\n+\n// OutputInterface matches the name of the outgoing interface for the\n// packet.\nOutputInterface string\n@@ -173,6 +186,55 @@ type IPHeaderFilter struct {\nOutputInterfaceInvert bool\n}\n+// match returns whether hdr matches the filter.\n+func (fl IPHeaderFilter) match(hdr header.IPv4, hook Hook, nicName string) bool {\n+ // TODO(gvisor.dev/issue/170): Support other fields of the filter.\n+ // Check the transport protocol.\n+ if fl.Protocol != 0 && fl.Protocol != hdr.TransportProtocol() {\n+ return false\n+ }\n+\n+ // Check the source and destination IPs.\n+ if !filterAddress(hdr.DestinationAddress(), fl.DstMask, fl.Dst, fl.DstInvert) || !filterAddress(hdr.SourceAddress(), fl.SrcMask, fl.Src, fl.SrcInvert) {\n+ return false\n+ }\n+\n+ // Check the output interface.\n+ // TODO(gvisor.dev/issue/170): Add the check for FORWARD and POSTROUTING\n+ // hooks after supported.\n+ if hook == Output {\n+ n := len(fl.OutputInterface)\n+ if n == 0 {\n+ return true\n+ }\n+\n+ // If the interface name ends with '+', any interface which begins\n+ // with the name should be matched.\n+ ifName := fl.OutputInterface\n+ matches := true\n+ if strings.HasSuffix(ifName, \"+\") {\n+ matches = strings.HasPrefix(nicName, ifName[:n-1])\n+ } else {\n+ matches = nicName == ifName\n+ }\n+ return fl.OutputInterfaceInvert != matches\n+ }\n+\n+ return true\n+}\n+\n+// filterAddress returns whether addr matches the filter.\n+func filterAddress(addr, mask, filterAddr tcpip.Address, invert bool) bool {\n+ matches := true\n+ for i := range filterAddr {\n+ if addr[i]&mask[i] != filterAddr[i] {\n+ matches = false\n+ break\n+ }\n+ }\n+ return matches != invert\n+}\n+\n// A Matcher is the interface for matching packets.\ntype Matcher interface {\n// Name returns the name of the Matcher.\n" }, { "change_type": "MODIFY", "old_path": "test/iptables/filter_input.go", "new_path": "test/iptables/filter_input.go", "diff": "@@ -49,6 +49,8 @@ func init() {\nRegisterTestCase(FilterInputJumpTwice{})\nRegisterTestCase(FilterInputDestination{})\nRegisterTestCase(FilterInputInvertDestination{})\n+ RegisterTestCase(FilterInputSource{})\n+ RegisterTestCase(FilterInputInvertSource{})\n}\n// FilterInputDropUDP tests that we can drop UDP traffic.\n@@ -667,3 +669,61 @@ func (FilterInputInvertDestination) ContainerAction(ip net.IP) error {\nfunc (FilterInputInvertDestination) LocalAction(ip net.IP) error {\nreturn sendUDPLoop(ip, acceptPort, sendloopDuration)\n}\n+\n+// FilterInputSource verifies that we can filter packets via `-d\n+// <ipaddr>`.\n+type FilterInputSource struct{}\n+\n+// Name implements TestCase.Name.\n+func (FilterInputSource) Name() string {\n+ return \"FilterInputSource\"\n+}\n+\n+// ContainerAction implements TestCase.ContainerAction.\n+func (FilterInputSource) ContainerAction(ip net.IP) error {\n+ // Make INPUT's default action DROP, then ACCEPT all packets from this\n+ // machine.\n+ rules := [][]string{\n+ {\"-P\", \"INPUT\", \"DROP\"},\n+ {\"-A\", \"INPUT\", \"-s\", fmt.Sprintf(\"%v\", ip), \"-j\", \"ACCEPT\"},\n+ }\n+ if err := filterTableRules(rules); err != nil {\n+ return err\n+ }\n+\n+ return listenUDP(acceptPort, sendloopDuration)\n+}\n+\n+// LocalAction implements TestCase.LocalAction.\n+func (FilterInputSource) LocalAction(ip net.IP) error {\n+ return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+}\n+\n+// FilterInputInvertSource verifies that we can filter packets via `! -d\n+// <ipaddr>`.\n+type FilterInputInvertSource struct{}\n+\n+// Name implements TestCase.Name.\n+func (FilterInputInvertSource) Name() string {\n+ return \"FilterInputInvertSource\"\n+}\n+\n+// ContainerAction implements TestCase.ContainerAction.\n+func (FilterInputInvertSource) ContainerAction(ip net.IP) error {\n+ // Make INPUT's default action DROP, then ACCEPT all packets not bound\n+ // for 127.0.0.1.\n+ rules := [][]string{\n+ {\"-P\", \"INPUT\", \"DROP\"},\n+ {\"-A\", \"INPUT\", \"!\", \"-s\", localIP, \"-j\", \"ACCEPT\"},\n+ }\n+ if err := filterTableRules(rules); err != nil {\n+ return err\n+ }\n+\n+ return listenUDP(acceptPort, sendloopDuration)\n+}\n+\n+// LocalAction implements TestCase.LocalAction.\n+func (FilterInputInvertSource) LocalAction(ip net.IP) error {\n+ return sendUDPLoop(ip, acceptPort, sendloopDuration)\n+}\n" }, { "change_type": "MODIFY", "old_path": "test/iptables/iptables_test.go", "new_path": "test/iptables/iptables_test.go", "diff": "@@ -302,3 +302,11 @@ func TestNATPreRedirectInvert(t *testing.T) {\nfunc TestNATRedirectRequiresProtocol(t *testing.T) {\nsingleTest(t, NATRedirectRequiresProtocol{})\n}\n+\n+func TestInputSource(t *testing.T) {\n+ singleTest(t, FilterInputSource{})\n+}\n+\n+func TestInputInvertSource(t *testing.T) {\n+ singleTest(t, FilterInputInvertSource{})\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Enable iptables source filtering (-s/--source)
259,860
29.05.2020 11:27:00
25,200
9ada8c972e43e0c3e144b432fe57d95f823a1847
Fix the smallest of typos.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "new_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "diff": "@@ -239,7 +239,7 @@ func (d *Dentry) Watches() *vfs.Watches {\n// InsertChild inserts child into the vfs dentry cache with the given name under\n// this dentry. This does not update the directory inode, so calling this on\n-// it's own isn't sufficient to insert a child into a directory. InsertChild\n+// its own isn't sufficient to insert a child into a directory. InsertChild\n// updates the link count on d if required.\n//\n// Precondition: d must represent a directory inode.\n" } ]
Go
Apache License 2.0
google/gvisor
Fix the smallest of typos. PiperOrigin-RevId: 313817646
259,860
29.05.2020 12:27:15
25,200
ccf69bdd7e05a4e5f404fbef89a7f49f218645e2
Implement IN_EXCL_UNLINK inotify option in vfs2. Limited to tmpfs. Inotify support in other filesystem implementations to follow. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/dentry.go", "new_path": "pkg/sentry/fsimpl/ext/dentry.go", "diff": "@@ -64,7 +64,7 @@ func (d *dentry) DecRef() {\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\n//\n// TODO(gvisor.dev/issue/1479): Implement inotify.\n-func (d *dentry) InotifyWithParent(events uint32, cookie uint32) {}\n+func (d *dentry) InotifyWithParent(events uint32, cookie uint32, et vfs.EventType) {}\n// Watches implements vfs.DentryImpl.Watches.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1042,7 +1042,7 @@ func (d *dentry) decRefLocked() {\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\n//\n// TODO(gvisor.dev/issue/1479): Implement inotify.\n-func (d *dentry) InotifyWithParent(events uint32, cookie uint32) {}\n+func (d *dentry) InotifyWithParent(events uint32, cookie uint32, et vfs.EventType) {}\n// Watches implements vfs.DentryImpl.Watches.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "new_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "diff": "@@ -228,7 +228,7 @@ func (d *Dentry) destroy() {\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\n//\n// TODO(gvisor.dev/issue/1479): Implement inotify.\n-func (d *Dentry) InotifyWithParent(events uint32, cookie uint32) {}\n+func (d *Dentry) InotifyWithParent(events uint32, cookie uint32, et vfs.EventType) {}\n// Watches implements vfs.DentryImpl.Watches.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/directory.go", "new_path": "pkg/sentry/fsimpl/tmpfs/directory.go", "diff": "@@ -79,6 +79,7 @@ func (dir *directory) removeChildLocked(child *dentry) {\ndir.iterMu.Lock()\ndir.childList.Remove(child)\ndir.iterMu.Unlock()\n+ child.unlinked = true\n}\ntype directoryFD struct {\n@@ -112,7 +113,7 @@ func (fd *directoryFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallba\ndir.iterMu.Lock()\ndefer dir.iterMu.Unlock()\n- fd.dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ fd.dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\nfd.inode().touchAtime(fd.vfsfd.Mount())\nif fd.off == 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -182,7 +182,7 @@ func (fs *filesystem) doCreateAt(rp *vfs.ResolvingPath, dir bool, create func(pa\nif dir {\nev |= linux.IN_ISDIR\n}\n- parentDir.inode.watches.Notify(name, uint32(ev), 0)\n+ parentDir.inode.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent)\nparentDir.inode.touchCMtime()\nreturn nil\n}\n@@ -247,7 +247,7 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\nreturn syserror.EMLINK\n}\nd.inode.incLinksLocked()\n- d.inode.watches.Notify(\"\", linux.IN_ATTRIB, 0)\n+ d.inode.watches.Notify(\"\", linux.IN_ATTRIB, 0, vfs.InodeEvent)\nparentDir.insertChildLocked(fs.newDentry(d.inode), name)\nreturn nil\n})\n@@ -361,7 +361,7 @@ afterTrailingSymlink:\nif err != nil {\nreturn nil, err\n}\n- parentDir.inode.watches.Notify(name, linux.IN_CREATE, 0)\n+ parentDir.inode.watches.Notify(name, linux.IN_CREATE, 0, vfs.PathEvent)\nparentDir.inode.touchCMtime()\nreturn fd, nil\n}\n@@ -613,7 +613,7 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error\nreturn err\n}\nparentDir.removeChildLocked(child)\n- parentDir.inode.watches.Notify(name, linux.IN_DELETE|linux.IN_ISDIR, 0)\n+ parentDir.inode.watches.Notify(name, linux.IN_DELETE|linux.IN_ISDIR, 0, vfs.InodeEvent)\n// Remove links for child, child/., and child/..\nchild.inode.decLinksLocked()\nchild.inode.decLinksLocked()\n@@ -636,7 +636,7 @@ func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts\n}\nif ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 {\n- d.InotifyWithParent(ev, 0)\n+ d.InotifyWithParent(ev, 0, vfs.InodeEvent)\n}\nreturn nil\n}\n@@ -784,7 +784,7 @@ func (fs *filesystem) SetxattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt\nreturn err\n}\n- d.InotifyWithParent(linux.IN_ATTRIB, 0)\n+ d.InotifyWithParent(linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n}\n@@ -800,7 +800,7 @@ func (fs *filesystem) RemovexattrAt(ctx context.Context, rp *vfs.ResolvingPath,\nreturn err\n}\n- d.InotifyWithParent(linux.IN_ATTRIB, 0)\n+ d.InotifyWithParent(linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -163,6 +163,11 @@ type dentry struct {\n// filesystem.mu.\nname string\n+ // unlinked indicates whether this dentry has been unlinked from its parent.\n+ // It is only set to true on an unlink operation, and never set from true to\n+ // false. unlinked is protected by filesystem.mu.\n+ unlinked bool\n+\n// dentryEntry (ugh) links dentries into their parent directory.childList.\ndentryEntry\n@@ -202,7 +207,7 @@ func (d *dentry) DecRef() {\n}\n// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.\n-func (d *dentry) InotifyWithParent(events uint32, cookie uint32) {\n+func (d *dentry) InotifyWithParent(events uint32, cookie uint32, et vfs.EventType) {\nif d.inode.isDir() {\nevents |= linux.IN_ISDIR\n}\n@@ -211,9 +216,9 @@ func (d *dentry) InotifyWithParent(events uint32, cookie uint32) {\nif d.parent != nil {\n// Note that d.parent or d.name may be stale if there is a concurrent\n// rename operation. Inotify does not provide consistency guarantees.\n- d.parent.inode.watches.Notify(d.name, events, cookie)\n+ d.parent.inode.watches.NotifyWithExclusions(d.name, events, cookie, et, d.unlinked)\n}\n- d.inode.watches.Notify(\"\", events, cookie)\n+ d.inode.watches.Notify(\"\", events, cookie, et)\n}\n// Watches implements vfs.DentryImpl.Watches.\n@@ -676,9 +681,8 @@ func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions)\nreturn err\n}\n- // Generate inotify events.\nif ev := vfs.InotifyEventFromStatMask(opts.Stat.Mask); ev != 0 {\n- d.InotifyWithParent(ev, 0)\n+ d.InotifyWithParent(ev, 0, vfs.InodeEvent)\n}\nreturn nil\n}\n@@ -701,7 +705,7 @@ func (fd *fileDescription) Setxattr(ctx context.Context, opts vfs.SetxattrOption\n}\n// Generate inotify events.\n- d.InotifyWithParent(linux.IN_ATTRIB, 0)\n+ d.InotifyWithParent(linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n}\n@@ -713,7 +717,7 @@ func (fd *fileDescription) Removexattr(ctx context.Context, name string) error {\n}\n// Generate inotify events.\n- d.InotifyWithParent(linux.IN_ATTRIB, 0)\n+ d.InotifyWithParent(linux.IN_ATTRIB, 0, vfs.InodeEvent)\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_table.go", "new_path": "pkg/sentry/kernel/fd_table.go", "diff": "@@ -158,7 +158,7 @@ func (f *FDTable) dropVFS2(file *vfs.FileDescription) {\nif file.IsWritable() {\nev = linux.IN_CLOSE_WRITE\n}\n- file.Dentry().InotifyWithParent(ev, 0)\n+ file.Dentry().InotifyWithParent(ev, 0, vfs.PathEvent)\n// Drop the table reference.\nfile.DecRef()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/read_write.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/read_write.go", "diff": "@@ -94,7 +94,7 @@ func read(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, opt\nn, err := file.Read(t, dst, opts)\nif err != syserror.ErrWouldBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -102,7 +102,7 @@ func read(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, opt\nallowBlock, deadline, hasDeadline := blockPolicy(t, file)\nif !allowBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -135,7 +135,7 @@ func read(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, opt\nfile.EventUnregister(&w)\nif total > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn total, err\n}\n@@ -258,7 +258,7 @@ func pread(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, of\nn, err := file.PRead(t, dst, offset, opts)\nif err != syserror.ErrWouldBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -266,7 +266,7 @@ func pread(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, of\nallowBlock, deadline, hasDeadline := blockPolicy(t, file)\nif !allowBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -299,7 +299,7 @@ func pread(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, of\nfile.EventUnregister(&w)\nif total > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn total, err\n}\n@@ -364,7 +364,7 @@ func write(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, op\nn, err := file.Write(t, src, opts)\nif err != syserror.ErrWouldBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -372,7 +372,7 @@ func write(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, op\nallowBlock, deadline, hasDeadline := blockPolicy(t, file)\nif !allowBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -405,7 +405,7 @@ func write(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, op\nfile.EventUnregister(&w)\nif total > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0, vfs.PathEvent)\n}\nreturn total, err\n}\n@@ -528,7 +528,7 @@ func pwrite(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, o\nn, err := file.PWrite(t, src, offset, opts)\nif err != syserror.ErrWouldBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_MODIFY, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -536,7 +536,7 @@ func pwrite(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, o\nallowBlock, deadline, hasDeadline := blockPolicy(t, file)\nif !allowBlock {\nif n > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn n, err\n}\n@@ -569,7 +569,7 @@ func pwrite(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, o\nfile.EventUnregister(&w)\nif total > 0 {\n- file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0)\n+ file.Dentry().InotifyWithParent(linux.IN_ACCESS, 0, vfs.PathEvent)\n}\nreturn total, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/anonfs.go", "new_path": "pkg/sentry/vfs/anonfs.go", "diff": "@@ -301,7 +301,7 @@ func (d *anonDentry) DecRef() {\n// InotifyWithParent implements DentryImpl.InotifyWithParent.\n//\n// TODO(gvisor.dev/issue/1479): Implement inotify.\n-func (d *anonDentry) InotifyWithParent(events uint32, cookie uint32) {}\n+func (d *anonDentry) InotifyWithParent(events uint32, cookie uint32, et EventType) {}\n// Watches implements DentryImpl.Watches.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/dentry.go", "new_path": "pkg/sentry/vfs/dentry.go", "diff": "@@ -113,7 +113,7 @@ type DentryImpl interface {\n//\n// Note that the events may not actually propagate up to the user, depending\n// on the event masks.\n- InotifyWithParent(events uint32, cookie uint32)\n+ InotifyWithParent(events uint32, cookie uint32, et EventType)\n// Watches returns the set of inotify watches for the file corresponding to\n// the Dentry. Dentries that are hard links to the same underlying file\n@@ -151,8 +151,8 @@ func (d *Dentry) isMounted() bool {\n// InotifyWithParent notifies all watches on the inodes for this dentry and\n// its parent of events.\n-func (d *Dentry) InotifyWithParent(events uint32, cookie uint32) {\n- d.impl.InotifyWithParent(events, cookie)\n+func (d *Dentry) InotifyWithParent(events uint32, cookie uint32, et EventType) {\n+ d.impl.InotifyWithParent(events, cookie, et)\n}\n// Watches returns the set of inotify watches associated with d.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/inotify.go", "new_path": "pkg/sentry/vfs/inotify.go", "diff": "@@ -33,6 +33,19 @@ import (\n// must be a power 2 for rounding below.\nconst inotifyEventBaseSize = 16\n+// EventType defines different kinds of inotfiy events.\n+//\n+// The way events are labelled appears somewhat arbitrary, but they must match\n+// Linux so that IN_EXCL_UNLINK behaves as it does in Linux.\n+type EventType uint8\n+\n+// PathEvent and InodeEvent correspond to FSNOTIFY_EVENT_PATH and\n+// FSNOTIFY_EVENT_INODE in Linux.\n+const (\n+ PathEvent EventType = iota\n+ InodeEvent EventType = iota\n+)\n+\n// Inotify represents an inotify instance created by inotify_init(2) or\n// inotify_init1(2). Inotify implements FileDescriptionImpl.\n//\n@@ -419,13 +432,22 @@ func (w *Watches) Remove(id uint64) {\n}\n// Notify queues a new event with all watches in this set.\n-func (w *Watches) Notify(name string, events, cookie uint32) {\n+func (w *Watches) Notify(name string, events, cookie uint32, et EventType) {\n+ w.NotifyWithExclusions(name, events, cookie, et, false)\n+}\n+\n+// NotifyWithExclusions queues a new event with watches in this set. Watches\n+// with IN_EXCL_UNLINK are skipped if the event is coming from a child that\n+// has been unlinked.\n+func (w *Watches) NotifyWithExclusions(name string, events, cookie uint32, et EventType, unlinked bool) {\n// N.B. We don't defer the unlocks because Notify is in the hot path of\n// all IO operations, and the defer costs too much for small IO\n// operations.\nw.mu.RLock()\nfor _, watch := range w.ws {\n- // TODO(gvisor.dev/issue/1479): Skip for IN_EXCL_UNLINK cases.\n+ if unlinked && watch.ExcludeUnlinkedChildren() && et == PathEvent {\n+ continue\n+ }\nwatch.Notify(name, events, cookie)\n}\nw.mu.RUnlock()\n@@ -434,7 +456,7 @@ func (w *Watches) Notify(name string, events, cookie uint32) {\n// HandleDeletion is called when the watch target is destroyed to emit\n// the appropriate events.\nfunc (w *Watches) HandleDeletion() {\n- w.Notify(\"\", linux.IN_DELETE_SELF, 0)\n+ w.Notify(\"\", linux.IN_DELETE_SELF, 0, InodeEvent)\n// TODO(gvisor.dev/issue/1479): This doesn't work because maps are not copied\n// by value. Ideally, we wouldn't have this circular locking so we can just\n@@ -655,8 +677,8 @@ func InotifyEventFromStatMask(mask uint32) uint32 {\n// InotifyRemoveChild sends the appriopriate notifications to the watch sets of\n// the child being removed and its parent.\nfunc InotifyRemoveChild(self, parent *Watches, name string) {\n- self.Notify(\"\", linux.IN_ATTRIB, 0)\n- parent.Notify(name, linux.IN_DELETE, 0)\n+ self.Notify(\"\", linux.IN_ATTRIB, 0, InodeEvent)\n+ parent.Notify(name, linux.IN_DELETE, 0, InodeEvent)\n// TODO(gvisor.dev/issue/1479): implement IN_EXCL_UNLINK.\n}\n@@ -668,8 +690,8 @@ func InotifyRename(ctx context.Context, renamed, oldParent, newParent *Watches,\ndirEv = linux.IN_ISDIR\n}\ncookie := uniqueid.InotifyCookie(ctx)\n- oldParent.Notify(oldName, dirEv|linux.IN_MOVED_FROM, cookie)\n- newParent.Notify(newName, dirEv|linux.IN_MOVED_TO, cookie)\n+ oldParent.Notify(oldName, dirEv|linux.IN_MOVED_FROM, cookie, InodeEvent)\n+ newParent.Notify(newName, dirEv|linux.IN_MOVED_TO, cookie, InodeEvent)\n// Somewhat surprisingly, self move events do not have a cookie.\n- renamed.Notify(\"\", linux.IN_MOVE_SELF, 0)\n+ renamed.Notify(\"\", linux.IN_MOVE_SELF, 0, InodeEvent)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/vfs.go", "new_path": "pkg/sentry/vfs/vfs.go", "diff": "@@ -422,7 +422,7 @@ func (vfs *VirtualFilesystem) OpenAt(ctx context.Context, creds *auth.Credential\n}\n}\n- fd.Dentry().InotifyWithParent(linux.IN_OPEN, 0)\n+ fd.Dentry().InotifyWithParent(linux.IN_OPEN, 0, PathEvent)\nreturn fd, nil\n}\nif !rp.handleError(err) {\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/inotify.cc", "new_path": "test/syscalls/linux/inotify.cc", "diff": "@@ -1778,6 +1778,170 @@ TEST(Inotify, IncludeUnlinkedFile_NoRandomSave) {\n}));\n}\n+// Watches created with IN_EXCL_UNLINK will stop emitting events on fds for\n+// children that have already been unlinked.\n+//\n+// We need to disable S/R because there are filesystems where we cannot re-open\n+// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.\n+TEST(Inotify, ExcludeUnlink_NoRandomSave) {\n+ const DisableSave ds;\n+ // TODO(gvisor.dev/issue/1624): This test fails on VFS1.\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const TempPath file =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\n+\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+ const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+\n+ // Unlink the child, which should cause further operations on the open file\n+ // descriptor to be ignored.\n+ ASSERT_THAT(unlink(file.path().c_str()), SyscallSucceeds());\n+ int val = 0;\n+ ASSERT_THAT(write(fd.get(), &val, sizeof(val)), SyscallSucceeds());\n+ ASSERT_THAT(read(fd.get(), &val, sizeof(val)), SyscallSucceeds());\n+ const std::vector<Event> events =\n+ ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({Event(IN_DELETE, wd, Basename(file.path()))}));\n+}\n+\n+// We need to disable S/R because there are filesystems where we cannot re-open\n+// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.\n+TEST(Inotify, ExcludeUnlinkDirectory_NoRandomSave) {\n+ const DisableSave ds;\n+\n+ const TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ TempPath dir =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDirIn(parent.path()));\n+ std::string dirPath = dir.path();\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(dirPath.c_str(), O_RDONLY | O_DIRECTORY));\n+ const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), parent.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+\n+ // Unlink the dir, and then close the open fd.\n+ ASSERT_THAT(rmdir(dirPath.c_str()), SyscallSucceeds());\n+ dir.reset();\n+\n+ const std::vector<Event> events =\n+ ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ // No close event should appear.\n+ ASSERT_THAT(events,\n+ Are({Event(IN_DELETE | IN_ISDIR, wd, Basename(dirPath))}));\n+}\n+\n+// If \"dir/child\" and \"dir/child2\" are links to the same file, and \"dir/child\"\n+// is unlinked, a watch on \"dir\" with IN_EXCL_UNLINK will exclude future events\n+// for fds on \"dir/child\" but not \"dir/child2\".\n+//\n+// We need to disable S/R because there are filesystems where we cannot re-open\n+// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.\n+TEST(Inotify, ExcludeUnlinkMultipleChildren_NoRandomSave) {\n+ const DisableSave ds;\n+ // TODO(gvisor.dev/issue/1624): This test fails on VFS1.\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const TempPath file =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n+ std::string path1 = file.path();\n+ std::string path2 = NewTempAbsPathInDir(dir.path());\n+\n+ const int rc = link(path1.c_str(), path2.c_str());\n+ // NOTE(b/34861058): link(2) is only supported on tmpfs in the sandbox.\n+ SKIP_IF(IsRunningOnGvisor() && rc != 0 &&\n+ (errno == EPERM || errno == ENOENT));\n+ ASSERT_THAT(rc, SyscallSucceeds());\n+ const FileDescriptor fd1 =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(path1.c_str(), O_RDWR));\n+ const FileDescriptor fd2 =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(path2.c_str(), O_RDWR));\n+\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+ const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+\n+ // After unlinking path1, only events on the fd for path2 should be generated.\n+ ASSERT_THAT(unlink(path1.c_str()), SyscallSucceeds());\n+ ASSERT_THAT(write(fd1.get(), \"x\", 1), SyscallSucceeds());\n+ ASSERT_THAT(write(fd2.get(), \"x\", 1), SyscallSucceeds());\n+\n+ const std::vector<Event> events =\n+ ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({\n+ Event(IN_DELETE, wd, Basename(path1)),\n+ Event(IN_MODIFY, wd, Basename(path2)),\n+ }));\n+}\n+\n+// On native Linux, actions of data type FSNOTIFY_EVENT_INODE are not affected\n+// by IN_EXCL_UNLINK (see\n+// fs/notify/inotify/inotify_fsnotify.c:inotify_handle_event). Inode-level\n+// events include changes to metadata and extended attributes.\n+//\n+// We need to disable S/R because there are filesystems where we cannot re-open\n+// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.\n+TEST(Inotify, ExcludeUnlinkInodeEvents_NoRandomSave) {\n+ const DisableSave ds;\n+\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const TempPath file =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path().c_str(), O_RDWR));\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+ const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+\n+ // NOTE(b/157163751): Create another link before unlinking. This is needed for\n+ // the gofer filesystem in gVisor, where open fds will not work once the link\n+ // count hits zero. In VFS2, we end up skipping the gofer test anyway, because\n+ // hard links are not supported for gofer fs.\n+ if (IsRunningOnGvisor()) {\n+ std::string link_path = NewTempAbsPath();\n+ const int rc = link(file.path().c_str(), link_path.c_str());\n+ // NOTE(b/34861058): link(2) is only supported on tmpfs in the sandbox.\n+ SKIP_IF(rc != 0 && (errno == EPERM || errno == ENOENT));\n+ ASSERT_THAT(rc, SyscallSucceeds());\n+ }\n+\n+ // Even after unlinking, inode-level operations will trigger events regardless\n+ // of IN_EXCL_UNLINK.\n+ ASSERT_THAT(unlink(file.path().c_str()), SyscallSucceeds());\n+\n+ // Perform various actions on fd.\n+ ASSERT_THAT(ftruncate(fd.get(), 12345), SyscallSucceeds());\n+ std::vector<Event> events =\n+ ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({\n+ Event(IN_DELETE, wd, Basename(file.path())),\n+ Event(IN_MODIFY, wd, Basename(file.path())),\n+ }));\n+\n+ struct timeval times[2] = {{1, 0}, {2, 0}};\n+ ASSERT_THAT(futimes(fd.get(), times), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd, Basename(file.path()))}));\n+\n+ // S/R is disabled on this entire test due to behavior with unlink; it must\n+ // also be disabled after this point because of fchmod.\n+ ASSERT_THAT(fchmod(fd.get(), 0777), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd, Basename(file.path()))}));\n+}\n+\n} // namespace\n} // namespace testing\n} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Implement IN_EXCL_UNLINK inotify option in vfs2. Limited to tmpfs. Inotify support in other filesystem implementations to follow. Updates #1479 PiperOrigin-RevId: 313828648
260,023
29.05.2020 12:27:55
25,200
089c88f2e87fb14cead02caea7f9dba0a5957395
Move TCP to CLOSED from SYN-RCVD on RST. RST handling is broken when the TCP state transitions from SYN-SENT to SYN-RCVD in case of simultaneous open. An incoming RST should trigger cleanup of the endpoint. RFC793, section 3.9, page 70. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -1347,6 +1347,7 @@ func (e *endpoint) protocolMainLoop(handshake bool, wakerInitDone chan<- struct{\ne.setEndpointState(StateError)\ne.HardError = err\n+ e.workerCleanup = true\n// Lock released below.\nepilogue()\nreturn err\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/dut.go", "new_path": "test/packetimpact/testbench/dut.go", "diff": "@@ -241,7 +241,9 @@ func (dut *DUT) Connect(fd int32, sa unix.Sockaddr) {\nctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)\ndefer cancel()\nret, err := dut.ConnectWithErrno(ctx, fd, sa)\n- if ret != 0 {\n+ // Ignore 'operation in progress' error that can be returned when the socket\n+ // is non-blocking.\n+ if err != syscall.Errno(unix.EINPROGRESS) && ret != 0 {\ndut.t.Fatalf(\"failed to connect socket: %s\", err)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/BUILD", "new_path": "test/packetimpact/tests/BUILD", "diff": "@@ -169,6 +169,26 @@ packetimpact_go_test(\n],\n)\n+packetimpact_go_test(\n+ name = \"tcp_synsent_reset\",\n+ srcs = [\"tcp_synsent_reset_test.go\"],\n+ deps = [\n+ \"//pkg/tcpip/header\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\n+packetimpact_go_test(\n+ name = \"tcp_synrcvd_reset\",\n+ srcs = [\"tcp_synrcvd_reset_test.go\"],\n+ deps = [\n+ \"//pkg/tcpip/header\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\npacketimpact_go_test(\nname = \"icmpv6_param_problem\",\nsrcs = [\"icmpv6_param_problem_test.go\"],\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/packetimpact/tests/tcp_synrcvd_reset_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp_syn_reset_test\n+\n+import (\n+ \"flag\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ tb \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ tb.RegisterFlags(flag.CommandLine)\n+}\n+\n+// TestTCPSynRcvdReset tests transition from SYN-RCVD to CLOSED.\n+func TestTCPSynRcvdReset(t *testing.T) {\n+ dut := tb.NewDUT(t)\n+ defer dut.TearDown()\n+ listenFD, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n+ defer dut.Close(listenFD)\n+ conn := tb.NewTCPIPv4(t, tb.TCP{DstPort: &remotePort}, tb.TCP{SrcPort: &remotePort})\n+ defer conn.Close()\n+\n+ // Expect dut connection to have transitioned to SYN-RCVD state.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)})\n+ if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {\n+ t.Fatalf(\"expected SYN-ACK %s\", err)\n+ }\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)})\n+ // Expect the connection to have transitioned SYN-RCVD to CLOSED.\n+ // TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})\n+ if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {\n+ t.Fatalf(\"expected a TCP RST %s\", err)\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/packetimpact/tests/tcp_synsent_reset_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp_synsent_reset_test\n+\n+import (\n+ \"flag\"\n+ \"net\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ tb \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ tb.RegisterFlags(flag.CommandLine)\n+}\n+\n+// dutSynSentState sets up the dut connection in SYN-SENT state.\n+func dutSynSentState(t *testing.T) (*tb.DUT, *tb.TCPIPv4, uint16, uint16) {\n+ dut := tb.NewDUT(t)\n+\n+ clientFD, clientPort := dut.CreateBoundSocket(unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, net.ParseIP(tb.RemoteIPv4))\n+ port := uint16(9001)\n+ conn := tb.NewTCPIPv4(t, tb.TCP{SrcPort: &port, DstPort: &clientPort}, tb.TCP{SrcPort: &clientPort, DstPort: &port})\n+\n+ sa := unix.SockaddrInet4{Port: int(port)}\n+ copy(sa.Addr[:], net.IP(net.ParseIP(tb.LocalIPv4)).To4())\n+ // Bring the dut to SYN-SENT state with a non-blocking connect.\n+ dut.Connect(clientFD, &sa)\n+ if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)}, nil, time.Second); err != nil {\n+ t.Fatalf(\"expected SYN\\n\")\n+ }\n+\n+ return &dut, &conn, port, clientPort\n+}\n+\n+// TestTCPSynSentReset tests RFC793, p67: SYN-SENT to CLOSED transition.\n+func TestTCPSynSentReset(t *testing.T) {\n+ dut, conn, _, _ := dutSynSentState(t)\n+ defer conn.Close()\n+ defer dut.TearDown()\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagRst | header.TCPFlagAck)})\n+ // Expect the connection to have closed.\n+ // TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})\n+ if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {\n+ t.Fatalf(\"expected a TCP RST\")\n+ }\n+}\n+\n+// TestTCPSynSentRcvdReset tests RFC793, p70, SYN-SENT to SYN-RCVD to CLOSED\n+// transitions.\n+func TestTCPSynSentRcvdReset(t *testing.T) {\n+ dut, c, remotePort, clientPort := dutSynSentState(t)\n+ defer dut.TearDown()\n+ defer c.Close()\n+\n+ conn := tb.NewTCPIPv4(t, tb.TCP{SrcPort: &remotePort, DstPort: &clientPort}, tb.TCP{SrcPort: &clientPort, DstPort: &remotePort})\n+ defer conn.Close()\n+ // Initiate new SYN connection with the same port pair\n+ // (simultaneous open case), expect the dut connection to move to\n+ // SYN-RCVD state\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)})\n+ if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {\n+ t.Fatalf(\"expected SYN-ACK %s\\n\", err)\n+ }\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)})\n+ // Expect the connection to have transitioned SYN-RCVD to CLOSED.\n+ // TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)})\n+ if _, err := conn.ExpectData(&tb.TCP{Flags: tb.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {\n+ t.Fatalf(\"expected a TCP RST\")\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Move TCP to CLOSED from SYN-RCVD on RST. RST handling is broken when the TCP state transitions from SYN-SENT to SYN-RCVD in case of simultaneous open. An incoming RST should trigger cleanup of the endpoint. RFC793, section 3.9, page 70. Fixes #2814 PiperOrigin-RevId: 313828777
260,003
29.05.2020 12:58:50
25,200
341be65421edff16fd9eeb593301ce1d66148772
Update WritePacket* API to take ownership of packets to be written. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -1304,13 +1304,16 @@ func (n *NIC) forwardPacket(r *Route, protocol tcpip.NetworkProtocolNumber, pkt\npkt.Header = buffer.NewPrependable(linkHeaderLen)\n}\n+ // WritePacket takes ownership of pkt, calculate numBytes first.\n+ numBytes := pkt.Header.UsedLength() + pkt.Data.Size()\n+\nif err := n.linkEP.WritePacket(r, nil /* gso */, protocol, pkt); err != nil {\nr.Stats().IP.OutgoingPacketErrors.Increment()\nreturn\n}\nn.stats.Tx.Packets.Increment()\n- n.stats.Tx.Bytes.IncrementBy(uint64(pkt.Header.UsedLength() + pkt.Data.Size()))\n+ n.stats.Tx.Bytes.IncrementBy(uint64(numBytes))\n}\n// DeliverTransportPacket delivers the packets to the appropriate transport\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/registration.go", "new_path": "pkg/tcpip/stack/registration.go", "diff": "@@ -240,16 +240,17 @@ type NetworkEndpoint interface {\nMaxHeaderLength() uint16\n// WritePacket writes a packet to the given destination address and\n- // protocol. It sets pkt.NetworkHeader. pkt.TransportHeader must have\n- // already been set.\n+ // protocol. It takes ownership of pkt. pkt.TransportHeader must have already\n+ // been set.\nWritePacket(r *Route, gso *GSO, params NetworkHeaderParams, pkt PacketBuffer) *tcpip.Error\n// WritePackets writes packets to the given destination address and\n- // protocol. pkts must not be zero length.\n+ // protocol. pkts must not be zero length. It takes ownership of pkts and\n+ // underlying packets.\nWritePackets(r *Route, gso *GSO, pkts PacketBufferList, params NetworkHeaderParams) (int, *tcpip.Error)\n// WriteHeaderIncludedPacket writes a packet that includes a network\n- // header to the given destination address.\n+ // header to the given destination address. It takes ownership of pkt.\nWriteHeaderIncludedPacket(r *Route, pkt PacketBuffer) *tcpip.Error\n// ID returns the network protocol endpoint ID.\n@@ -382,9 +383,8 @@ type LinkEndpoint interface {\nLinkAddress() tcpip.LinkAddress\n// WritePacket writes a packet with the given protocol through the\n- // given route. It sets pkt.LinkHeader if a link layer header exists.\n- // pkt.NetworkHeader and pkt.TransportHeader must have already been\n- // set.\n+ // given route. It takes ownership of pkt. pkt.NetworkHeader and\n+ // pkt.TransportHeader must have already been set.\n//\n// To participate in transparent bridging, a LinkEndpoint implementation\n// should call eth.Encode with header.EthernetFields.SrcAddr set to\n@@ -392,7 +392,8 @@ type LinkEndpoint interface {\nWritePacket(r *Route, gso *GSO, protocol tcpip.NetworkProtocolNumber, pkt PacketBuffer) *tcpip.Error\n// WritePackets writes packets with the given protocol through the\n- // given route. pkts must not be zero length.\n+ // given route. pkts must not be zero length. It takes ownership of pkts and\n+ // underlying packets.\n//\n// Right now, WritePackets is used only when the software segmentation\n// offload is enabled. If it will be used for something else, it may\n@@ -400,7 +401,7 @@ type LinkEndpoint interface {\nWritePackets(r *Route, gso *GSO, pkts PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, *tcpip.Error)\n// WriteRawPacket writes a packet directly to the link. The packet\n- // should already have an ethernet header.\n+ // should already have an ethernet header. It takes ownership of vv.\nWriteRawPacket(vv buffer.VectorisedView) *tcpip.Error\n// Attach attaches the data link layer endpoint to the network-layer\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/route.go", "new_path": "pkg/tcpip/stack/route.go", "diff": "@@ -158,12 +158,15 @@ func (r *Route) WritePacket(gso *GSO, params NetworkHeaderParams, pkt PacketBuff\nreturn tcpip.ErrInvalidEndpointState\n}\n+ // WritePacket takes ownership of pkt, calculate numBytes first.\n+ numBytes := pkt.Header.UsedLength() + pkt.Data.Size()\n+\nerr := r.ref.ep.WritePacket(r, gso, params, pkt)\nif err != nil {\nr.Stats().IP.OutgoingPacketErrors.Increment()\n} else {\nr.ref.nic.stats.Tx.Packets.Increment()\n- r.ref.nic.stats.Tx.Bytes.IncrementBy(uint64(pkt.Header.UsedLength() + pkt.Data.Size()))\n+ r.ref.nic.stats.Tx.Bytes.IncrementBy(uint64(numBytes))\n}\nreturn err\n}\n@@ -175,9 +178,12 @@ func (r *Route) WritePackets(gso *GSO, pkts PacketBufferList, params NetworkHead\nreturn 0, tcpip.ErrInvalidEndpointState\n}\n+ // WritePackets takes ownership of pkt, calculate length first.\n+ numPkts := pkts.Len()\n+\nn, err := r.ref.ep.WritePackets(r, gso, pkts, params)\nif err != nil {\n- r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(pkts.Len() - n))\n+ r.Stats().IP.OutgoingPacketErrors.IncrementBy(uint64(numPkts - n))\n}\nr.ref.nic.stats.Tx.Packets.IncrementBy(uint64(n))\n@@ -198,12 +204,15 @@ func (r *Route) WriteHeaderIncludedPacket(pkt PacketBuffer) *tcpip.Error {\nreturn tcpip.ErrInvalidEndpointState\n}\n+ // WriteHeaderIncludedPacket takes ownership of pkt, calculate numBytes first.\n+ numBytes := pkt.Data.Size()\n+\nif err := r.ref.ep.WriteHeaderIncludedPacket(r, pkt); err != nil {\nr.Stats().IP.OutgoingPacketErrors.Increment()\nreturn err\n}\nr.ref.nic.stats.Tx.Packets.Increment()\n- r.ref.nic.stats.Tx.Bytes.IncrementBy(uint64(pkt.Data.Size()))\n+ r.ref.nic.stats.Tx.Bytes.IncrementBy(uint64(numBytes))\nreturn nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Update WritePacket* API to take ownership of packets to be written. Updates #2404. PiperOrigin-RevId: 313834784
259,858
29.05.2020 13:19:53
25,200
c017ca8138e0f271b99f68a494cb37bfaeba5a54
Fix issue with make copy targets.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -173,7 +173,8 @@ $(RELEASE_KEY):\nrelease: $(RELEASE_KEY) ## Builds a release.\n@mkdir -p $(RELEASE_ROOT)\n@T=$$(mktemp -d /tmp/release.XXXXXX); \\\n- $(MAKE) copy TARGETS=\"runsc runsc:runsc-debian\" DESTINATION=$$T && \\\n+ $(MAKE) copy TARGETS=\"runsc\" DESTINATION=$$T && \\\n+ $(MAKE) copy TARGETS=\"runsc:runsc-debian\" DESTINATION=$$T && \\\nNIGHTLY=$(RELEASE_NIGHTLY) tools/make_release.sh $(RELEASE_KEY) $(RELEASE_ROOT) $$T/*; \\\nrc=$$?; rm -rf $$T; exit $$rc\n.PHONY: release\n" } ]
Go
Apache License 2.0
google/gvisor
Fix issue with make copy targets. PiperOrigin-RevId: 313838809
259,891
29.05.2020 15:37:34
25,200
790811f75783cd3cb82b6aba5e8152129b2d1d4d
Fix copied comment mistakes.
[ { "change_type": "MODIFY", "old_path": "test/iptables/filter_input.go", "new_path": "test/iptables/filter_input.go", "diff": "@@ -670,7 +670,7 @@ func (FilterInputInvertDestination) LocalAction(ip net.IP) error {\nreturn sendUDPLoop(ip, acceptPort, sendloopDuration)\n}\n-// FilterInputSource verifies that we can filter packets via `-d\n+// FilterInputSource verifies that we can filter packets via `-s\n// <ipaddr>`.\ntype FilterInputSource struct{}\n@@ -699,7 +699,7 @@ func (FilterInputSource) LocalAction(ip net.IP) error {\nreturn sendUDPLoop(ip, acceptPort, sendloopDuration)\n}\n-// FilterInputInvertSource verifies that we can filter packets via `! -d\n+// FilterInputInvertSource verifies that we can filter packets via `! -s\n// <ipaddr>`.\ntype FilterInputInvertSource struct{}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix copied comment mistakes. PiperOrigin-RevId: 313862843
259,881
29.05.2020 15:40:06
25,200
65569cfca08a99e3700108cec64f3aa443c357b0
Update Go version build tags None of the dependencies have changed in 1.15. It may be possible to simplify some of the wrappers in rawfile following 1.13, but that can come in a later change.
[ { "change_type": "MODIFY", "old_path": "pkg/procid/procid_amd64.s", "new_path": "pkg/procid/procid_amd64.s", "diff": "// +build amd64\n// +build go1.8\n-// +build !go1.15\n+// +build !go1.16\n#include \"textflag.h\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/procid/procid_arm64.s", "new_path": "pkg/procid/procid_arm64.s", "diff": "// +build arm64\n// +build go1.8\n-// +build !go1.15\n+// +build !go1.16\n#include \"textflag.h\"\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go", "new_path": "pkg/sentry/platform/ptrace/subprocess_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/mount_unsafe.go", "new_path": "pkg/sentry/vfs/mount_unsafe.go", "diff": "// limitations under the License.\n// +build go1.12\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sleep/sleep_unsafe.go", "new_path": "pkg/sleep/sleep_unsafe.go", "diff": "// limitations under the License.\n// +build go1.11\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/memmove_unsafe.go", "new_path": "pkg/sync/memmove_unsafe.go", "diff": "// license that can be found in the LICENSE file.\n// +build go1.12\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/mutex_unsafe.go", "new_path": "pkg/sync/mutex_unsafe.go", "diff": "// license that can be found in the LICENSE file.\n// +build go1.13\n-// +build !go1.15\n+// +build !go1.16\n// When updating the build constraint (above), check that syncMutex matches the\n// standard library sync.Mutex definition.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sync/rwmutex_unsafe.go", "new_path": "pkg/sync/rwmutex_unsafe.go", "diff": "// license that can be found in the LICENSE file.\n// +build go1.13\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/syncevent/waiter_unsafe.go", "new_path": "pkg/syncevent/waiter_unsafe.go", "diff": "// limitations under the License.\n// +build go1.11\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/rawfile/blockingpoll_yield_unsafe.go", "new_path": "pkg/tcpip/link/rawfile/blockingpoll_yield_unsafe.go", "diff": "// +build linux,amd64 linux,arm64\n// +build go1.12\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/time_unsafe.go", "new_path": "pkg/tcpip/time_unsafe.go", "diff": "// limitations under the License.\n// +build go1.9\n-// +build !go1.15\n+// +build !go1.16\n// Check go:linkname function signatures when updating Go version.\n" } ]
Go
Apache License 2.0
google/gvisor
Update Go version build tags None of the dependencies have changed in 1.15. It may be possible to simplify some of the wrappers in rawfile following 1.13, but that can come in a later change. PiperOrigin-RevId: 313863264
259,898
29.05.2020 17:22:56
25,200
a9b47390c821942d60784e308f681f213645049c
Test TCP should queue RECEIVE request in SYN-SENT
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/dut/posix_server.cc", "new_path": "test/packetimpact/dut/posix_server.cc", "diff": "@@ -158,6 +158,14 @@ class PosixImpl final : public posix_server::Posix::Service {\nreturn ::grpc::Status::OK;\n}\n+ ::grpc::Status Fcntl(grpc_impl::ServerContext *context,\n+ const ::posix_server::FcntlRequest *request,\n+ ::posix_server::FcntlResponse *response) override {\n+ response->set_ret(::fcntl(request->fd(), request->cmd(), request->arg()));\n+ response->set_errno_(errno);\n+ return ::grpc::Status::OK;\n+ }\n+\n::grpc::Status GetSockName(\ngrpc_impl::ServerContext *context,\nconst ::posix_server::GetSockNameRequest *request,\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/proto/posix_server.proto", "new_path": "test/packetimpact/proto/posix_server.proto", "diff": "@@ -91,6 +91,17 @@ message ConnectResponse {\nint32 errno_ = 2; // \"errno\" may fail to compile in c++.\n}\n+message FcntlRequest {\n+ int32 fd = 1;\n+ int32 cmd = 2;\n+ int32 arg = 3;\n+}\n+\n+message FcntlResponse {\n+ int32 ret = 1;\n+ int32 errno_ = 2;\n+}\n+\nmessage GetSockNameRequest {\nint32 sockfd = 1;\n}\n@@ -198,6 +209,8 @@ service Posix {\nrpc Close(CloseRequest) returns (CloseResponse);\n// Call connect() on the DUT.\nrpc Connect(ConnectRequest) returns (ConnectResponse);\n+ // Call fcntl() on the DUT.\n+ rpc Fcntl(FcntlRequest) returns (FcntlResponse);\n// Call getsockname() on the DUT.\nrpc GetSockName(GetSockNameRequest) returns (GetSockNameResponse);\n// Call getsockopt() on the DUT.\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/connections.go", "new_path": "test/packetimpact/testbench/connections.go", "diff": "@@ -266,14 +266,14 @@ func SeqNumValue(v seqnum.Value) *seqnum.Value {\n}\n// newTCPState creates a new TCPState.\n-func newTCPState(domain int, out, in TCP) (*tcpState, error) {\n+func newTCPState(domain int, out, in TCP) (*tcpState, unix.Sockaddr, error) {\nportPickerFD, localAddr, err := pickPort(domain, unix.SOCK_STREAM)\nif err != nil {\n- return nil, err\n+ return nil, nil, err\n}\nlocalPort, err := portFromSockaddr(localAddr)\nif err != nil {\n- return nil, err\n+ return nil, nil, err\n}\ns := tcpState{\nout: TCP{SrcPort: &localPort},\n@@ -283,12 +283,12 @@ func newTCPState(domain int, out, in TCP) (*tcpState, error) {\nfinSent: false,\n}\nif err := s.out.merge(&out); err != nil {\n- return nil, err\n+ return nil, nil, err\n}\nif err := s.in.merge(&in); err != nil {\n- return nil, err\n+ return nil, nil, err\n}\n- return &s, nil\n+ return &s, localAddr, nil\n}\nfunc (s *tcpState) outgoing() Layer {\n@@ -606,7 +606,7 @@ func NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {\nif err != nil {\nt.Fatalf(\"can't make ipv4State: %s\", err)\n}\n- tcpState, err := newTCPState(unix.AF_INET, outgoingTCP, incomingTCP)\n+ tcpState, localAddr, err := newTCPState(unix.AF_INET, outgoingTCP, incomingTCP)\nif err != nil {\nt.Fatalf(\"can't make tcpState: %s\", err)\n}\n@@ -623,6 +623,7 @@ func NewTCPIPv4(t *testing.T, outgoingTCP, incomingTCP TCP) TCPIPv4 {\nlayerStates: []layerState{etherState, ipv4State, tcpState},\ninjector: injector,\nsniffer: sniffer,\n+ localAddr: localAddr,\nt: t,\n}\n}\n@@ -703,6 +704,11 @@ func (conn *TCPIPv4) SynAck() *TCP {\nreturn conn.state().synAck\n}\n+// LocalAddr gets the local socket address of this connection.\n+func (conn *TCPIPv4) LocalAddr() unix.Sockaddr {\n+ return conn.localAddr\n+}\n+\n// IPv6Conn maintains the state for all the layers in a IPv6 connection.\ntype IPv6Conn Connection\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/dut.go", "new_path": "test/packetimpact/testbench/dut.go", "diff": "@@ -262,6 +262,35 @@ func (dut *DUT) ConnectWithErrno(ctx context.Context, fd int32, sa unix.Sockaddr\nreturn resp.GetRet(), syscall.Errno(resp.GetErrno_())\n}\n+// Fcntl calls fcntl on the DUT and causes a fatal test failure if it\n+// doesn't succeed. If more control over the timeout or error handling is\n+// needed, use FcntlWithErrno.\n+func (dut *DUT) Fcntl(fd, cmd, arg int32) int32 {\n+ dut.t.Helper()\n+ ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout)\n+ defer cancel()\n+ ret, err := dut.FcntlWithErrno(ctx, fd, cmd, arg)\n+ if ret == -1 {\n+ dut.t.Fatalf(\"failed to Fcntl: ret=%d, errno=%s\", ret, err)\n+ }\n+ return ret\n+}\n+\n+// FcntlWithErrno calls fcntl on the DUT.\n+func (dut *DUT) FcntlWithErrno(ctx context.Context, fd, cmd, arg int32) (int32, error) {\n+ dut.t.Helper()\n+ req := pb.FcntlRequest{\n+ Fd: fd,\n+ Cmd: cmd,\n+ Arg: arg,\n+ }\n+ resp, err := dut.posixServer.Fcntl(ctx, &req)\n+ if err != nil {\n+ dut.t.Fatalf(\"failed to call Fcntl: %s\", err)\n+ }\n+ return resp.GetRet(), syscall.Errno(resp.GetErrno_())\n+}\n+\n// GetSockName calls getsockname on the DUT and causes a fatal test failure if\n// it doesn't succeed. If more control over the timeout or error handling is\n// needed, use GetSockNameWithErrno.\n@@ -478,6 +507,19 @@ func (dut *DUT) SendToWithErrno(ctx context.Context, sockfd int32, buf []byte, f\nreturn resp.GetRet(), syscall.Errno(resp.GetErrno_())\n}\n+// SetNonBlocking will set O_NONBLOCK flag for fd if nonblocking\n+// is true, otherwise it will clear the flag.\n+func (dut *DUT) SetNonBlocking(fd int32, nonblocking bool) {\n+ dut.t.Helper()\n+ flags := dut.Fcntl(fd, unix.F_GETFL, 0)\n+ if nonblocking {\n+ flags |= unix.O_NONBLOCK\n+ } else {\n+ flags &= ^unix.O_NONBLOCK\n+ }\n+ dut.Fcntl(fd, unix.F_SETFL, flags)\n+}\n+\nfunc (dut *DUT) setSockOpt(ctx context.Context, sockfd, level, optname int32, optval *pb.SockOptVal) (int32, error) {\ndut.t.Helper()\nreq := pb.SetSockOptRequest{\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/BUILD", "new_path": "test/packetimpact/tests/BUILD", "diff": "@@ -169,6 +169,18 @@ packetimpact_go_test(\n],\n)\n+packetimpact_go_test(\n+ name = \"tcp_queue_receive_in_syn_sent\",\n+ srcs = [\"tcp_queue_receive_in_syn_sent_test.go\"],\n+ # TODO(b/157658105): Fix netstack then remove the line below.\n+ expect_netstack_failure = True,\n+ deps = [\n+ \"//pkg/tcpip/header\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\npacketimpact_go_test(\nname = \"tcp_synsent_reset\",\nsrcs = [\"tcp_synsent_reset_test.go\"],\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/packetimpact/tests/tcp_queue_receive_in_syn_sent_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp_queue_receive_in_syn_sent_test\n+\n+import (\n+ \"bytes\"\n+ \"context\"\n+ \"encoding/hex\"\n+ \"errors\"\n+ \"flag\"\n+ \"net\"\n+ \"sync\"\n+ \"syscall\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ tb \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ tb.RegisterFlags(flag.CommandLine)\n+}\n+\n+func TestQueueReceiveInSynSent(t *testing.T) {\n+ dut := tb.NewDUT(t)\n+ defer dut.TearDown()\n+\n+ socket, remotePort := dut.CreateBoundSocket(unix.SOCK_STREAM, unix.IPPROTO_TCP, net.ParseIP(tb.RemoteIPv4))\n+ conn := tb.NewTCPIPv4(t, tb.TCP{DstPort: &remotePort}, tb.TCP{SrcPort: &remotePort})\n+ defer conn.Close()\n+\n+ sampleData := []byte(\"Sample Data\")\n+\n+ dut.SetNonBlocking(socket, true)\n+ if _, err := dut.ConnectWithErrno(context.Background(), socket, conn.LocalAddr()); !errors.Is(err, syscall.EINPROGRESS) {\n+ t.Fatalf(\"failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS\", err)\n+ }\n+ if _, err := conn.Expect(tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn)}, time.Second); err != nil {\n+ t.Fatalf(\"expected a SYN from DUT, but got none: %s\", err)\n+ }\n+\n+ // Issue RECEIVE call in SYN-SENT, this should be queued for process until the connection\n+ // is established.\n+ dut.SetNonBlocking(socket, false)\n+ var wg sync.WaitGroup\n+ defer wg.Wait()\n+ wg.Add(1)\n+ go func() {\n+ defer wg.Done()\n+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)\n+ defer cancel()\n+ n, buff, err := dut.RecvWithErrno(ctx, socket, int32(len(sampleData)), 0)\n+ if n == -1 {\n+ t.Fatalf(\"failed to recv on DUT: %s\", err)\n+ }\n+ if got := buff[:n]; !bytes.Equal(got, sampleData) {\n+ t.Fatalf(\"received data don't match, got:\\n%s, want:\\n%s\", hex.Dump(got), hex.Dump(sampleData))\n+ }\n+ }()\n+\n+ // The following sleep is used to prevent the connection from being established while the\n+ // RPC is in flight.\n+ time.Sleep(time.Second)\n+\n+ // Bring the connection to Established.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagSyn | header.TCPFlagAck)})\n+ if _, err := conn.Expect(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)}, time.Second); err != nil {\n+ t.Fatalf(\"expected an ACK from DUT, but got none: %s\", err)\n+ }\n+\n+ // Send sample data to DUT.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck)}, &tb.Payload{Bytes: sampleData})\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Test TCP should queue RECEIVE request in SYN-SENT PiperOrigin-RevId: 313878910
259,962
01.06.2020 09:56:52
25,200
839208f118793c665f48a057d1f35d74979b7916
Enable TCP Receive buffer moderation in gonet and benchmark. Fixes
[ { "change_type": "MODIFY", "old_path": "benchmarks/tcp/tcp_benchmark.sh", "new_path": "benchmarks/tcp/tcp_benchmark.sh", "diff": "@@ -94,6 +94,9 @@ while [ $# -gt 0 ]; do\n--cubic)\nnetstack_opts=\"${netstack_opts} -cubic\"\n;;\n+ --moderate-recv-buf)\n+ netstack_opts=\"${netstack_opts} -moderate_recv_buf\"\n+ ;;\n--duration)\nshift\n[ \"$#\" -le 0 ] && echo \"no duration provided\" && exit 1\n@@ -149,6 +152,7 @@ while [ $# -gt 0 ]; do\necho \" --server use netstack as the server\"\necho \" --mtu set the mtu (bytes)\"\necho \" --sack enable SACK support\"\n+ echo \" --moderate-recv-buf enable TCP receive buffer auto-tuning\"\necho \" --cubic enable CUBIC congestion control for Netstack\"\necho \" --duration set the test duration (s)\"\necho \" --latency set the latency (ms)\"\n" }, { "change_type": "MODIFY", "old_path": "benchmarks/tcp/tcp_proxy.go", "new_path": "benchmarks/tcp/tcp_proxy.go", "diff": "@@ -56,6 +56,7 @@ var (\nmask = flag.Int(\"mask\", 8, \"mask size for address\")\niface = flag.String(\"iface\", \"\", \"network interface name to bind for netstack\")\nsack = flag.Bool(\"sack\", false, \"enable SACK support for netstack\")\n+ moderateRecvBuf = flag.Bool(\"moderate_recv_buf\", false, \"enable TCP Receive Buffer Auto-tuning\")\ncubic = flag.Bool(\"cubic\", false, \"enable use of CUBIC congestion control for netstack\")\ngso = flag.Int(\"gso\", 0, \"GSO maximum size\")\nswgso = flag.Bool(\"swgso\", false, \"software-level GSO\")\n@@ -231,6 +232,11 @@ func newNetstackImpl(mode string) (impl, error) {\nreturn nil, fmt.Errorf(\"SetTransportProtocolOption for SACKEnabled failed: %v\", err)\n}\n+ // Enable Receive Buffer Auto-Tuning.\n+ if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.ModerateReceiveBufferOption(*moderateRecvBuf)); err != nil {\n+ return nil, fmt.Errorf(\"SetTransportProtocolOption failed: %v\", err)\n+ }\n+\n// Set Congestion Control to cubic if requested.\nif *cubic {\nif err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.CongestionControlOption(\"cubic\")); err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/adapters/gonet/gonet.go", "new_path": "pkg/tcpip/adapters/gonet/gonet.go", "diff": "@@ -335,6 +335,11 @@ func (c *TCPConn) Read(b []byte) (int, error) {\ndeadline := c.readCancel()\nnumRead := 0\n+ defer func() {\n+ if numRead != 0 {\n+ c.ep.ModerateRecvBuf(numRead)\n+ }\n+ }()\nfor numRead != len(b) {\nif len(c.read) == 0 {\nvar err error\n" } ]
Go
Apache License 2.0
google/gvisor
Enable TCP Receive buffer moderation in gonet and benchmark. Fixes #1666 PiperOrigin-RevId: 314148384
259,858
01.06.2020 10:27:59
25,200
2c6c4365ea3ad23166353aa03643fc009669ee93
Move to make for tag release workflow. This will make tag & release workflows idempotent.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -158,10 +158,16 @@ website-deploy: website-push ## Deploy a new version of the website.\n## RELEASE_ROOT - The repository root (default: \"repo\" directory).\n## RELEASE_KEY - The repository GPG private key file (default: dummy key is created).\n## RELEASE_NIGHTLY - Set to true if a nightly release (default: false).\n+## RELEASE_COMMIT - The commit or Change-Id for the release (needed for tag).\n+## RELEASE_NAME - The name of the release in the proper format (needed for tag).\n+## RELEASE_NOTES - The file containing release notes (needed for tag).\n##\nRELEASE_ROOT := $(CURDIR)/repo\nRELEASE_KEY := repo.key\nRELEASE_NIGHTLY := false\n+RELEASE_COMMIT :=\n+RELEASE_NAME :=\n+RELEASE_NOTES :=\n$(RELEASE_KEY):\n@echo \"WARNING: Generating a key for testing ($@); don't use this.\"\n@@ -179,6 +185,10 @@ release: $(RELEASE_KEY) ## Builds a release.\nrc=$$?; rm -rf $$T; exit $$rc\n.PHONY: release\n+tag: ## Creates and pushes a release tag.\n+ @tools/tag_release.sh \"$(RELEASE_COMMIT)\" \"$(RELEASE_NAME)\" \"$(RELEASE_NOTES)\"\n+.PHONY: tag\n+\n##\n## Development helpers and tooling.\n##\n" }, { "change_type": "DELETE", "old_path": "scripts/release.sh", "new_path": null, "diff": "-#!/bin/bash\n-\n-# Copyright 2018 The gVisor Authors.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-cd $(dirname $0)/..\n-source scripts/common.sh\n-\n-# Tag a release only if provided.\n-if ! [[ -v KOKORO_RELEASE_COMMIT ]]; then\n- echo \"No KOKORO_RELEASE_COMMIT provided.\" >&2\n- exit 1\n-fi\n-if ! [[ -v KOKORO_RELEASE_TAG ]]; then\n- echo \"No KOKORO_RELEASE_TAG provided.\" >&2\n- exit 1\n-fi\n-if ! [[ -v KOKORO_RELNOTES ]]; then\n- echo \"No KOKORO_RELNOTES provided.\" >&2\n- exit 1\n-fi\n-if ! [[ -r \"${KOKORO_ARTIFACTS_DIR}/${KOKORO_RELNOTES}\" ]]; then\n- echo \"The file '${KOKORO_ARTIFACTS_DIR}/${KOKORO_RELNOTES}' is not readable.\" >&2\n- exit 1\n-fi\n-\n-# Unless an explicit releaser is provided, use the bot e-mail.\n-declare -r KOKORO_RELEASE_AUTHOR=${KOKORO_RELEASE_AUTHOR:-gvisor-bot}\n-declare -r EMAIL=${EMAIL:-${KOKORO_RELEASE_AUTHOR}@google.com}\n-\n-# Ensure we have an appropriate configuration for the tag.\n-git config --get user.name || git config user.name \"gVisor-bot\"\n-git config --get user.email || git config user.email \"${EMAIL}\"\n-\n-# Provide a credential if available.\n-if [[ -v KOKORO_GITHUB_ACCESS_TOKEN ]]; then\n- git config --global credential.helper cache\n- git credential approve <<EOF\n-protocol=https\n-host=github.com\n-username=$(cat \"${KOKORO_KEYSTORE_DIR}/${KOKORO_GITHUB_ACCESS_TOKEN}\")\n-password=x-oauth-basic\n-EOF\n-fi\n-\n-# Run the release tool, which pushes to the origin repository.\n-tools/tag_release.sh \\\n- \"${KOKORO_RELEASE_COMMIT}\" \\\n- \"${KOKORO_RELEASE_TAG}\" \\\n- \"${KOKORO_ARTIFACTS_DIR}/${KOKORO_RELNOTES}\"\n" }, { "change_type": "MODIFY", "old_path": "tools/tag_release.sh", "new_path": "tools/tag_release.sh", "diff": "# validate a provided release name, create a tag and push it. It must be\n# run manually when a release is created.\n-set -xeu\n+set -xeuo pipefail\n# Check arguments.\n-if [ \"$#\" -ne 3 ]; then\n+if [[ \"$#\" -ne 3 ]]; then\necho \"usage: $0 <commit|revid> <release.rc> <message-file>\"\nexit 1\nfi\n@@ -30,6 +30,12 @@ declare -r target_commit=\"$1\"\ndeclare -r release=\"$2\"\ndeclare -r message_file=\"$3\"\n+if [[ -z \"${target_commit}\" ]]; then\n+ echo \"error: <commit|revid> is empty.\"\n+fi\n+if [[ -z \"${release}\" ]]; then\n+ echo \"error: <release.rc> is empty.\"\n+fi\nif ! [[ -r \"${message_file}\" ]]; then\necho \"error: message file '${message_file}' is not readable.\"\nexit 1\n@@ -68,8 +74,9 @@ if ! [[ \"${release}\" =~ ^20[0-9]{6}\\.[0-9]+$ ]]; then\nexit 1\nfi\n-# Tag the given commit (annotated, to record the committer).\n+# Tag the given commit (annotated, to record the committer). Note that the tag\n+# here is applied as a force, in case the tag already exists and is the same.\n+# The push will fail in this case (because it is not forced).\ndeclare -r tag=\"release-${release}\"\n-(git tag -F \"${message_file}\" -a \"${tag}\" \"${commit}\" && \\\n- git push origin tag \"${tag}\") || \\\n- (git tag -d \"${tag}\" && false)\n+git tag -f -F \"${message_file}\" -a \"${tag}\" \"${commit}\" && \\\n+ git push origin tag \"${tag}\"\n" } ]
Go
Apache License 2.0
google/gvisor
Move to make for tag release workflow. This will make tag & release workflows idempotent. PiperOrigin-RevId: 314154888
259,992
01.06.2020 11:43:05
25,200
16100d18cbe27f01e1f0c147f91a694518ddc160
Make gofer mount readonly when overlay is enabled No writes are expected to the underlying filesystem when using --overlay.
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -168,7 +168,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\n// Start with root mount, then add any other additional mount as needed.\nats := make([]p9.Attacher, 0, len(spec.Mounts)+1)\nap, err := fsgofer.NewAttachPoint(\"/\", fsgofer.Config{\n- ROMount: spec.Root.Readonly,\n+ ROMount: spec.Root.Readonly || conf.Overlay,\nPanicOnWrite: g.panicOnWrite,\n})\nif err != nil {\n@@ -181,7 +181,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})\nfor _, m := range spec.Mounts {\nif specutils.Is9PMount(m) {\ncfg := fsgofer.Config{\n- ROMount: isReadonlyMount(m.Options),\n+ ROMount: isReadonlyMount(m.Options) || conf.Overlay,\nPanicOnWrite: g.panicOnWrite,\nHostUDS: conf.FSGoferHostUDS,\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Make gofer mount readonly when overlay is enabled No writes are expected to the underlying filesystem when using --overlay. PiperOrigin-RevId: 314171457
259,860
01.06.2020 13:28:49
25,200
07c3b1dc5561f8ceb376cba1d79a41cd4b7a2533
Skip proc/pid/fd socket test for VFS1 only.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix.cc", "new_path": "test/syscalls/linux/socket_unix.cc", "diff": "@@ -256,10 +256,9 @@ TEST_P(UnixSocketPairTest, ShutdownWrite) {\n}\nTEST_P(UnixSocketPairTest, SocketReopenFromProcfs) {\n- // TODO(b/122310852): We should be returning ENXIO and NOT EIO.\n- // TODO(github.dev/issue/1624): This should be resolved in VFS2. Verify\n- // that this is the case and delete the SKIP_IF once we delete VFS1.\n- SKIP_IF(IsRunningOnGvisor());\n+ // TODO(gvisor.dev/issue/1624): In VFS1, we return EIO instead of ENXIO (see\n+ // b/122310852). Remove this skip once VFS1 is deleted.\n+ SKIP_IF(IsRunningWithVFS1());\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n// Opening a socket pair via /proc/self/fd/X is a ENXIO.\n" } ]
Go
Apache License 2.0
google/gvisor
Skip proc/pid/fd socket test for VFS1 only. PiperOrigin-RevId: 314192359
259,860
01.06.2020 13:29:17
25,200
35a3f462d9ccc5237f0200fcbeafaebb110b5134
Fix inotify test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/inotify.cc", "new_path": "test/syscalls/linux/inotify.cc", "diff": "@@ -593,12 +593,12 @@ TEST(Inotify, SizeZeroReadWriteGeneratesNothing) {\nTEST(Inotify, FailedFileCreationGeneratesNoEvents) {\nconst TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const std::string dir_path = dir.path();\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n- ASSERT_NO_ERRNO_AND_VALUE(\n- InotifyAddWatch(fd.get(), dir.path(), IN_ALL_EVENTS));\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(fd.get(), dir_path, IN_ALL_EVENTS));\n- const char* p = dir.path().c_str();\n+ const char* p = dir_path.c_str();\nASSERT_THAT(mkdir(p, 0777), SyscallFails());\nASSERT_THAT(mknod(p, S_IFIFO, 0777), SyscallFails());\nASSERT_THAT(symlink(p, p), SyscallFails());\n" } ]
Go
Apache License 2.0
google/gvisor
Fix inotify test. PiperOrigin-RevId: 314192441
259,885
01.06.2020 15:31:59
25,200
3a987160aa09f814a8459ed3f6192ce741b701a3
Handle gofer blocking opens of host named pipes in VFS2. Using tee instead of read to detect when a O_RDONLY|O_NONBLOCK pipe FD has a writer circumvents the problem of what to do with the byte read from the pipe, avoiding much of the complexity of the fdpipe package.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/BUILD", "new_path": "pkg/sentry/fsimpl/gofer/BUILD", "diff": "@@ -35,6 +35,7 @@ go_library(\n\"fstree.go\",\n\"gofer.go\",\n\"handle.go\",\n+ \"host_named_pipe.go\",\n\"p9file.go\",\n\"regular_file.go\",\n\"socket.go\",\n@@ -47,6 +48,7 @@ go_library(\n\"//pkg/abi/linux\",\n\"//pkg/context\",\n\"//pkg/fd\",\n+ \"//pkg/fdnotifier\",\n\"//pkg/fspath\",\n\"//pkg/log\",\n\"//pkg/p9\",\n@@ -71,6 +73,7 @@ go_library(\n\"//pkg/unet\",\n\"//pkg/usermem\",\n\"//pkg/waiter\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -873,19 +873,37 @@ func (d *dentry) openSpecialFileLocked(ctx context.Context, mnt *vfs.Mount, opts\nif opts.Flags&linux.O_DIRECT != 0 {\nreturn nil, syserror.EINVAL\n}\n- h, err := openHandle(ctx, d.file, ats&vfs.MayRead != 0, ats&vfs.MayWrite != 0, opts.Flags&linux.O_TRUNC != 0)\n+ // We assume that the server silently inserts O_NONBLOCK in the open flags\n+ // for all named pipes (because all existing gofers do this).\n+ //\n+ // NOTE(b/133875563): This makes named pipe opens racy, because the\n+ // mechanisms for translating nonblocking to blocking opens can only detect\n+ // the instantaneous presence of a peer holding the other end of the pipe\n+ // open, not whether the pipe was *previously* opened by a peer that has\n+ // since closed its end.\n+ isBlockingOpenOfNamedPipe := d.fileType() == linux.S_IFIFO && opts.Flags&linux.O_NONBLOCK == 0\n+retry:\n+ h, err := openHandle(ctx, d.file, ats.MayRead(), ats.MayWrite(), opts.Flags&linux.O_TRUNC != 0)\nif err != nil {\n+ if isBlockingOpenOfNamedPipe && ats == vfs.MayWrite && err == syserror.ENXIO {\n+ // An attempt to open a named pipe with O_WRONLY|O_NONBLOCK fails\n+ // with ENXIO if opening the same named pipe with O_WRONLY would\n+ // block because there are no readers of the pipe.\n+ if err := sleepBetweenNamedPipeOpenChecks(ctx); err != nil {\nreturn nil, err\n}\n- seekable := d.fileType() == linux.S_IFREG\n- fd := &specialFileFD{\n- handle: h,\n- seekable: seekable,\n+ goto retry\n}\n- if err := fd.vfsfd.Init(fd, opts.Flags, mnt, &d.vfsd, &vfs.FileDescriptionOptions{\n- DenyPRead: !seekable,\n- DenyPWrite: !seekable,\n- }); err != nil {\n+ return nil, err\n+ }\n+ if isBlockingOpenOfNamedPipe && ats == vfs.MayRead && h.fd >= 0 {\n+ if err := blockUntilNonblockingPipeHasWriter(ctx, h.fd); err != nil {\n+ h.close(ctx)\n+ return nil, err\n+ }\n+ }\n+ fd, err := newSpecialFileFD(h, mnt, d, opts.Flags)\n+ if err != nil {\nh.close(ctx)\nreturn nil, err\n}\n@@ -981,22 +999,16 @@ func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.Resolving\n}\nchildVFSFD = &fd.vfsfd\n} else {\n- seekable := child.fileType() == linux.S_IFREG\n- fd := &specialFileFD{\n- handle: handle{\n+ h := handle{\nfile: openFile,\nfd: -1,\n- },\n- seekable: seekable,\n}\nif fdobj != nil {\n- fd.handle.fd = int32(fdobj.Release())\n+ h.fd = int32(fdobj.Release())\n}\n- if err := fd.vfsfd.Init(fd, opts.Flags, mnt, &child.vfsd, &vfs.FileDescriptionOptions{\n- DenyPRead: !seekable,\n- DenyPWrite: !seekable,\n- }); err != nil {\n- fd.handle.close(ctx)\n+ fd, err := newSpecialFileFD(h, mnt, child, opts.Flags)\n+ if err != nil {\n+ h.close(ctx)\nreturn nil, err\n}\nchildVFSFD = &fd.vfsfd\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/sentry/fsimpl/gofer/host_named_pipe.go", "diff": "+// Copyright 2019 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package gofer\n+\n+import (\n+ \"fmt\"\n+ \"sync\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/syserror\"\n+)\n+\n+// Global pipe used by blockUntilNonblockingPipeHasWriter since we can't create\n+// pipes after sentry initialization due to syscall filters.\n+var (\n+ tempPipeMu sync.Mutex\n+ tempPipeReadFD int\n+ tempPipeWriteFD int\n+ tempPipeBuf [1]byte\n+)\n+\n+func init() {\n+ var pipeFDs [2]int\n+ if err := unix.Pipe(pipeFDs[:]); err != nil {\n+ panic(fmt.Sprintf(\"failed to create pipe for gofer.blockUntilNonblockingPipeHasWriter: %v\", err))\n+ }\n+ tempPipeReadFD = pipeFDs[0]\n+ tempPipeWriteFD = pipeFDs[1]\n+}\n+\n+func blockUntilNonblockingPipeHasWriter(ctx context.Context, fd int32) error {\n+ for {\n+ ok, err := nonblockingPipeHasWriter(fd)\n+ if err != nil {\n+ return err\n+ }\n+ if ok {\n+ return nil\n+ }\n+ if err := sleepBetweenNamedPipeOpenChecks(ctx); err != nil {\n+ return err\n+ }\n+ }\n+}\n+\n+func nonblockingPipeHasWriter(fd int32) (bool, error) {\n+ tempPipeMu.Lock()\n+ defer tempPipeMu.Unlock()\n+ // Copy 1 byte from fd into the temporary pipe.\n+ n, err := unix.Tee(int(fd), tempPipeWriteFD, 1, unix.SPLICE_F_NONBLOCK)\n+ if err == syserror.EAGAIN {\n+ // The pipe represented by fd is empty, but has a writer.\n+ return true, nil\n+ }\n+ if err != nil {\n+ return false, err\n+ }\n+ if n == 0 {\n+ // The pipe represented by fd is empty and has no writer.\n+ return false, nil\n+ }\n+ // The pipe represented by fd is non-empty, so it either has, or has\n+ // previously had, a writer. Remove the byte copied to the temporary pipe\n+ // before returning.\n+ if n, err := unix.Read(tempPipeReadFD, tempPipeBuf[:]); err != nil || n != 1 {\n+ panic(fmt.Sprintf(\"failed to drain pipe for gofer.blockUntilNonblockingPipeHasWriter: got (%d, %v), wanted (1, nil)\", n, err))\n+ }\n+ return true, nil\n+}\n+\n+func sleepBetweenNamedPipeOpenChecks(ctx context.Context) error {\n+ t := time.NewTimer(100 * time.Millisecond)\n+ defer t.Stop()\n+ cancel := ctx.SleepStart()\n+ select {\n+ case <-t.C:\n+ ctx.SleepFinish(true)\n+ return nil\n+ case <-cancel:\n+ ctx.SleepFinish(false)\n+ return syserror.ErrInterrupted\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/special_file.go", "new_path": "pkg/sentry/fsimpl/gofer/special_file.go", "diff": "@@ -19,17 +19,18 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/context\"\n+ \"gvisor.dev/gvisor/pkg/fdnotifier\"\n\"gvisor.dev/gvisor/pkg/safemem\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/usermem\"\n+ \"gvisor.dev/gvisor/pkg/waiter\"\n)\n-// specialFileFD implements vfs.FileDescriptionImpl for files other than\n-// regular files, directories, and symlinks: pipes, sockets, etc. It is also\n-// used for regular files when filesystemOptions.specialRegularFiles is in\n-// effect. specialFileFD differs from regularFileFD by using per-FD handles\n-// instead of shared per-dentry handles, and never buffering I/O.\n+// specialFileFD implements vfs.FileDescriptionImpl for pipes, sockets, device\n+// special files, and (when filesystemOptions.specialRegularFiles is in effect)\n+// regular files. specialFileFD differs from regularFileFD by using per-FD\n+// handles instead of shared per-dentry handles, and never buffering I/O.\ntype specialFileFD struct {\nfileDescription\n@@ -40,13 +41,47 @@ type specialFileFD struct {\n// file offset is significant, i.e. a regular file. seekable is immutable.\nseekable bool\n+ // mayBlock is true if this file description represents a file for which\n+ // queue may send I/O readiness events. mayBlock is immutable.\n+ mayBlock bool\n+ queue waiter.Queue\n+\n// If seekable is true, off is the file offset. off is protected by mu.\nmu sync.Mutex\noff int64\n}\n+func newSpecialFileFD(h handle, mnt *vfs.Mount, d *dentry, flags uint32) (*specialFileFD, error) {\n+ ftype := d.fileType()\n+ seekable := ftype == linux.S_IFREG\n+ mayBlock := ftype == linux.S_IFIFO || ftype == linux.S_IFSOCK\n+ fd := &specialFileFD{\n+ handle: h,\n+ seekable: seekable,\n+ mayBlock: mayBlock,\n+ }\n+ if mayBlock && h.fd >= 0 {\n+ if err := fdnotifier.AddFD(h.fd, &fd.queue); err != nil {\n+ return nil, err\n+ }\n+ }\n+ if err := fd.vfsfd.Init(fd, flags, mnt, &d.vfsd, &vfs.FileDescriptionOptions{\n+ DenyPRead: !seekable,\n+ DenyPWrite: !seekable,\n+ }); err != nil {\n+ if mayBlock && h.fd >= 0 {\n+ fdnotifier.RemoveFD(h.fd)\n+ }\n+ return nil, err\n+ }\n+ return fd, nil\n+}\n+\n// Release implements vfs.FileDescriptionImpl.Release.\nfunc (fd *specialFileFD) Release() {\n+ if fd.mayBlock && fd.handle.fd >= 0 {\n+ fdnotifier.RemoveFD(fd.handle.fd)\n+ }\nfd.handle.close(context.Background())\nfs := fd.vfsfd.Mount().Filesystem().Impl().(*filesystem)\nfs.syncMu.Lock()\n@@ -62,6 +97,32 @@ func (fd *specialFileFD) OnClose(ctx context.Context) error {\nreturn fd.handle.file.flush(ctx)\n}\n+// Readiness implements waiter.Waitable.Readiness.\n+func (fd *specialFileFD) Readiness(mask waiter.EventMask) waiter.EventMask {\n+ if fd.mayBlock {\n+ return fdnotifier.NonBlockingPoll(fd.handle.fd, mask)\n+ }\n+ return fd.fileDescription.Readiness(mask)\n+}\n+\n+// EventRegister implements waiter.Waitable.EventRegister.\n+func (fd *specialFileFD) EventRegister(e *waiter.Entry, mask waiter.EventMask) {\n+ if fd.mayBlock {\n+ fd.queue.EventRegister(e, mask)\n+ return\n+ }\n+ fd.fileDescription.EventRegister(e, mask)\n+}\n+\n+// EventUnregister implements waiter.Waitable.EventUnregister.\n+func (fd *specialFileFD) EventUnregister(e *waiter.Entry) {\n+ if fd.mayBlock {\n+ fd.queue.EventUnregister(e)\n+ return\n+ }\n+ fd.fileDescription.EventUnregister(e)\n+}\n+\n// PRead implements vfs.FileDescriptionImpl.PRead.\nfunc (fd *specialFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {\nif fd.seekable && offset < 0 {\n@@ -81,6 +142,9 @@ func (fd *specialFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs\n}\nbuf := make([]byte, dst.NumBytes())\nn, err := fd.handle.readToBlocksAt(ctx, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf)), uint64(offset))\n+ if err == syserror.EAGAIN {\n+ err = syserror.ErrWouldBlock\n+ }\nif n == 0 {\nreturn 0, err\n}\n@@ -130,6 +194,9 @@ func (fd *specialFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off\nreturn 0, err\n}\nn, err := fd.handle.writeFromBlocksAt(ctx, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf)), uint64(offset))\n+ if err == syserror.EAGAIN {\n+ err = syserror.ErrWouldBlock\n+ }\nreturn int64(n), err\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/filter/config.go", "new_path": "runsc/boot/filter/config.go", "diff": "@@ -288,6 +288,14 @@ var allowedSyscalls = seccomp.SyscallRules{\nsyscall.SYS_SIGALTSTACK: {},\nunix.SYS_STATX: {},\nsyscall.SYS_SYNC_FILE_RANGE: {},\n+ syscall.SYS_TEE: []seccomp.Rule{\n+ {\n+ seccomp.AllowAny{},\n+ seccomp.AllowAny{},\n+ seccomp.AllowValue(1), /* len */\n+ seccomp.AllowValue(unix.SPLICE_F_NONBLOCK), /* flags */\n+ },\n+ },\nsyscall.SYS_TGKILL: []seccomp.Rule{\n{\nseccomp.AllowValue(uint64(os.Getpid())),\n" } ]
Go
Apache License 2.0
google/gvisor
Handle gofer blocking opens of host named pipes in VFS2. Using tee instead of read to detect when a O_RDONLY|O_NONBLOCK pipe FD has a writer circumvents the problem of what to do with the byte read from the pipe, avoiding much of the complexity of the fdpipe package. PiperOrigin-RevId: 314216146
259,885
01.06.2020 18:11:35
25,200
49a9b78f74fca28cc9312dfb29ccbe70e3b5fcc3
Fix VFS2 gofer open(O_CREAT) reference leak. gofer.filesystem.createAndOpenChildLocked() doesn't need to take a reference on the new dentry since vfs.FileDescription.Init() will do so.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -760,7 +760,7 @@ afterTrailingSymlink:\nparent.dirMu.Unlock()\nreturn nil, syserror.EPERM\n}\n- fd, err := parent.createAndOpenChildLocked(ctx, rp, &opts)\n+ fd, err := parent.createAndOpenChildLocked(ctx, rp, &opts, &ds)\nparent.dirMu.Unlock()\nreturn fd, err\n}\n@@ -912,7 +912,7 @@ retry:\n// Preconditions: d.fs.renameMu must be locked. d.dirMu must be locked.\n// !d.isSynthetic().\n-func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\n+func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions, ds **[]*dentry) (*vfs.FileDescription, error) {\nif err := d.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {\nreturn nil, err\n}\n@@ -965,6 +965,7 @@ func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.Resolving\n}\nreturn nil, err\n}\n+ *ds = appendDentry(*ds, child)\n// Incorporate the fid that was opened by lcreate.\nuseRegularFileFD := child.fileType() == linux.S_IFREG && !d.fs.opts.regularFilesUseSpecialFileFD\nif useRegularFileFD {\n@@ -977,10 +978,6 @@ func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.Resolving\nchild.handleWritable = vfs.MayWriteFileWithOpenFlags(opts.Flags)\nchild.handleMu.Unlock()\n}\n- // Take a reference on the new dentry to be held by the new file\n- // description. (This reference also means that the new dentry is not\n- // eligible for caching yet, so we don't need to append to a dentry slice.)\n- child.refs = 1\n// Insert the dentry into the tree.\nd.cacheNewChildLocked(child, name)\nif d.cachedMetadataAuthoritative() {\n" } ]
Go
Apache License 2.0
google/gvisor
Fix VFS2 gofer open(O_CREAT) reference leak. gofer.filesystem.createAndOpenChildLocked() doesn't need to take a reference on the new dentry since vfs.FileDescription.Init() will do so. PiperOrigin-RevId: 314242127
259,860
01.06.2020 18:30:09
25,200
050d8e6e331e01d732471e4641dc51346e7a7d3b
Add inotify events for extended attributes and splice. Splice, setxattr and removexattr should generate events. Note that VFS2 already generates events for extended attributes. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_splice.go", "new_path": "pkg/sentry/syscalls/linux/sys_splice.go", "diff": "@@ -80,6 +80,12 @@ func doSplice(t *kernel.Task, outFile, inFile *fs.File, opts fs.SpliceOpts, nonB\n}\n}\n+ if total > 0 {\n+ // On Linux, inotify behavior is not very consistent with splice(2). We try\n+ // our best to emulate Linux for very basic calls to splice, where for some\n+ // reason, events are generated for output files, but not input files.\n+ outFile.Dirent.InotifyEvent(linux.IN_MODIFY, 0)\n+ }\nreturn total, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_xattr.go", "new_path": "pkg/sentry/syscalls/linux/sys_xattr.go", "diff": "@@ -207,7 +207,11 @@ func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, si\nreturn syserror.EOPNOTSUPP\n}\n- return d.Inode.SetXattr(t, d, name, value, flags)\n+ if err := d.Inode.SetXattr(t, d, name, value, flags); err != nil {\n+ return err\n+ }\n+ d.InotifyEvent(linux.IN_ATTRIB, 0)\n+ return nil\n}\nfunc copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) {\n@@ -418,7 +422,11 @@ func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr usermem.Addr) error {\nreturn syserror.EOPNOTSUPP\n}\n- return d.Inode.RemoveXattr(t, d, name)\n+ if err := d.Inode.RemoveXattr(t, d, name); err != nil {\n+ return err\n+ }\n+ d.InotifyEvent(linux.IN_ATTRIB, 0)\n+ return nil\n}\n// LINT.ThenChange(vfs2/xattr.go)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/splice.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/splice.go", "diff": "@@ -187,6 +187,11 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal\nif n == 0 {\nreturn 0, nil, err\n}\n+\n+ // On Linux, inotify behavior is not very consistent with splice(2). We try\n+ // our best to emulate Linux for very basic calls to splice, where for some\n+ // reason, events are generated for output files, but not input files.\n+ outFile.Dentry().InotifyWithParent(linux.IN_MODIFY, 0, vfs.PathEvent)\nreturn uintptr(n), nil, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/inotify.cc", "new_path": "test/syscalls/linux/inotify.cc", "diff": "#include <sys/inotify.h>\n#include <sys/ioctl.h>\n#include <sys/time.h>\n+#include <sys/xattr.h>\n#include <atomic>\n#include <list>\n@@ -1655,9 +1656,44 @@ TEST(Inotify, EpollNoDeadlock) {\n}\n}\n-TEST(Inotify, SpliceEvent) {\n- // TODO(gvisor.dev/issue/138): Implement splice in VFS2.\n- SKIP_IF(IsRunningOnGvisor() && !IsRunningWithVFS1());\n+// On Linux, inotify behavior is not very consistent with splice(2). We try our\n+// best to emulate Linux for very basic calls to splice.\n+TEST(Inotify, SpliceOnWatchTarget) {\n+ int pipes[2];\n+ ASSERT_THAT(pipe2(pipes, O_NONBLOCK), SyscallSucceeds());\n+\n+ const TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith(\n+ dir.path(), \"some content\", TempPath::kDefaultFileMode));\n+\n+ const FileDescriptor fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\n+ const int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(\n+ InotifyAddWatch(inotify_fd.get(), dir.path(), IN_ALL_EVENTS));\n+ const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(\n+ InotifyAddWatch(inotify_fd.get(), file.path(), IN_ALL_EVENTS));\n+\n+ EXPECT_THAT(splice(fd.get(), nullptr, pipes[1], nullptr, 1, /*flags=*/0),\n+ SyscallSucceedsWithValue(1));\n+\n+ // Surprisingly, events are not generated in Linux if we read from a file.\n+ std::vector<Event> events =\n+ ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ ASSERT_THAT(events, Are({}));\n+\n+ EXPECT_THAT(splice(pipes[0], nullptr, fd.get(), nullptr, 1, /*flags=*/0),\n+ SyscallSucceedsWithValue(1));\n+\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ ASSERT_THAT(events, Are({\n+ Event(IN_MODIFY, dir_wd, Basename(file.path())),\n+ Event(IN_MODIFY, file_wd),\n+ }));\n+}\n+\n+TEST(Inotify, SpliceOnInotifyFD) {\nint pipes[2];\nASSERT_THAT(pipe2(pipes, O_NONBLOCK), SyscallSucceeds());\n@@ -1719,6 +1755,58 @@ TEST(Inotify, LinkOnOtherParent) {\nEXPECT_THAT(events, Are({}));\n}\n+TEST(Inotify, Xattr) {\n+ // TODO(gvisor.dev/issue/1636): Support extended attributes in runsc gofer.\n+ SKIP_IF(IsRunningOnGvisor());\n+\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const std::string path = file.path();\n+ const FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(path, O_RDWR));\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+ const int wd = ASSERT_NO_ERRNO_AND_VALUE(\n+ InotifyAddWatch(inotify_fd.get(), path, IN_ALL_EVENTS));\n+\n+ const char* cpath = path.c_str();\n+ const char* name = \"user.test\";\n+ int val = 123;\n+ ASSERT_THAT(setxattr(cpath, name, &val, sizeof(val), /*flags=*/0),\n+ SyscallSucceeds());\n+ std::vector<Event> events =\n+ ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));\n+\n+ ASSERT_THAT(getxattr(cpath, name, &val, sizeof(val)), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({}));\n+\n+ char list[100];\n+ ASSERT_THAT(listxattr(cpath, list, sizeof(list)), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({}));\n+\n+ ASSERT_THAT(removexattr(cpath, name), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));\n+\n+ ASSERT_THAT(fsetxattr(fd.get(), name, &val, sizeof(val), /*flags=*/0),\n+ SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));\n+\n+ ASSERT_THAT(fgetxattr(fd.get(), name, &val, sizeof(val)), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({}));\n+\n+ ASSERT_THAT(flistxattr(fd.get(), list, sizeof(list)), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({}));\n+\n+ ASSERT_THAT(fremovexattr(fd.get(), name), SyscallSucceeds());\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd)}));\n+}\n+\nTEST(Inotify, Exec) {\nconst TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nconst TempPath bin = ASSERT_NO_ERRNO_AND_VALUE(\n" } ]
Go
Apache License 2.0
google/gvisor
Add inotify events for extended attributes and splice. Splice, setxattr and removexattr should generate events. Note that VFS2 already generates events for extended attributes. Updates #1479. PiperOrigin-RevId: 314244261
259,992
01.06.2020 21:30:28
25,200
ca5912d13c63dcaff72bf6eb6d49bde8fc4e3f73
More runsc changes for VFS2 Add /tmp handling Apply mount options Enable more container_test tests Forward signals to child process when test respaws process to run as root inside namespace. Updates
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -217,6 +217,7 @@ dev: ## Installs a set of local runtimes. Requires sudo.\n@$(MAKE) configure RUNTIME=\"$(RUNTIME)\" ARGS=\"--net-raw\"\n@$(MAKE) configure RUNTIME=\"$(RUNTIME)-d\" ARGS=\"--net-raw --debug --strace --log-packets\"\n@$(MAKE) configure RUNTIME=\"$(RUNTIME)-p\" ARGS=\"--net-raw --profile\"\n+ @$(MAKE) configure RUNTIME=\"$(RUNTIME)-vfs2-d\" ARGS=\"--net-raw --debug --strace --log-packets --vfs2\"\n@sudo systemctl restart docker\n.PHONY: dev\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/fs.go", "new_path": "runsc/boot/fs.go", "diff": "@@ -63,7 +63,7 @@ const (\n)\n// tmpfs has some extra supported options that we must pass through.\n-var tmpfsAllowedOptions = []string{\"mode\", \"uid\", \"gid\"}\n+var tmpfsAllowedData = []string{\"mode\", \"uid\", \"gid\"}\nfunc addOverlay(ctx context.Context, conf *Config, lower *fs.Inode, name string, lowerFlags fs.MountSourceFlags) (*fs.Inode, error) {\n// Upper layer uses the same flags as lower, but it must be read-write.\n@@ -154,8 +154,8 @@ func compileMounts(spec *specs.Spec) []specs.Mount {\nreturn mounts\n}\n-// p9MountOptions creates a slice of options for a p9 mount.\n-func p9MountOptions(fd int, fa FileAccessType, vfs2 bool) []string {\n+// p9MountData creates a slice of p9 mount data.\n+func p9MountData(fd int, fa FileAccessType, vfs2 bool) []string {\nopts := []string{\n\"trans=fd\",\n\"rfdno=\" + strconv.Itoa(fd),\n@@ -235,7 +235,7 @@ func isSupportedMountFlag(fstype, opt string) bool {\nreturn true\n}\nif fstype == tmpfsvfs2.Name {\n- ok, err := parseMountOption(opt, tmpfsAllowedOptions...)\n+ ok, err := parseMountOption(opt, tmpfsAllowedData...)\nreturn ok && err == nil\n}\nreturn false\n@@ -716,7 +716,7 @@ func (c *containerMounter) createRootMount(ctx context.Context, conf *Config) (*\nfd := c.fds.remove()\nlog.Infof(\"Mounting root over 9P, ioFD: %d\", fd)\np9FS := mustFindFilesystem(\"9p\")\n- opts := p9MountOptions(fd, conf.FileAccess, false /* vfs2 */)\n+ opts := p9MountData(fd, conf.FileAccess, false /* vfs2 */)\nif conf.OverlayfsStaleRead {\n// We can't check for overlayfs here because sandbox is chroot'ed and gofer\n@@ -770,7 +770,7 @@ func (c *containerMounter) getMountNameAndOptions(conf *Config, m specs.Mount) (\nfsName = m.Type\nvar err error\n- opts, err = parseAndFilterOptions(m.Options, tmpfsAllowedOptions...)\n+ opts, err = parseAndFilterOptions(m.Options, tmpfsAllowedData...)\nif err != nil {\nreturn \"\", nil, false, err\n}\n@@ -778,7 +778,7 @@ func (c *containerMounter) getMountNameAndOptions(conf *Config, m specs.Mount) (\ncase bind:\nfd := c.fds.remove()\nfsName = gofervfs2.Name\n- opts = p9MountOptions(fd, c.getMountAccessType(m), conf.VFS2)\n+ opts = p9MountData(fd, c.getMountAccessType(m), conf.VFS2)\n// If configured, add overlay to all writable mounts.\nuseOverlay = conf.Overlay && !mountFlags(m.Options).ReadOnly\n@@ -931,7 +931,7 @@ func (c *containerMounter) createRestoreEnvironment(conf *Config) (*fs.RestoreEn\n// Add root mount.\nfd := c.fds.remove()\n- opts := p9MountOptions(fd, conf.FileAccess, false /* vfs2 */)\n+ opts := p9MountData(fd, conf.FileAccess, false /* vfs2 */)\nmf := fs.MountSourceFlags{}\nif c.root.Readonly || conf.Overlay {\n@@ -1019,7 +1019,7 @@ func (c *containerMounter) mountTmp(ctx context.Context, conf *Config, mns *fs.M\nDestination: \"/tmp\",\n// Sticky bit is added to prevent accidental deletion of files from\n// another user. This is normally done for /tmp.\n- Options: []string{\"mode=1777\"},\n+ Options: []string{\"mode=01777\"},\n}\nreturn c.mountSubmount(ctx, conf, mns, root, tmpMount)\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/vfs.go", "new_path": "runsc/boot/vfs.go", "diff": "@@ -136,7 +136,7 @@ func (c *containerMounter) setupVFS2(ctx context.Context, conf *Config, procArgs\nfunc (c *containerMounter) createMountNamespaceVFS2(ctx context.Context, conf *Config, creds *auth.Credentials) (*vfs.MountNamespace, error) {\nfd := c.fds.remove()\n- opts := strings.Join(p9MountOptions(fd, conf.FileAccess, true /* vfs2 */), \",\")\n+ opts := strings.Join(p9MountData(fd, conf.FileAccess, true /* vfs2 */), \",\")\nlog.Infof(\"Mounting root over 9P, ioFD: %d\", fd)\nmns, err := c.k.VFS().NewMountNamespace(ctx, creds, \"\", gofer.Name, &vfs.GetFilesystemOptions{Data: opts})\n@@ -160,8 +160,9 @@ func (c *containerMounter) mountSubmountsVFS2(ctx context.Context, conf *Config,\n}\n}\n- // TODO(gvisor.dev/issue/1487): implement mountTmp from fs.go.\n-\n+ if err := c.mountTmpVFS2(ctx, conf, creds, mns); err != nil {\n+ return fmt.Errorf(`mount submount \"\\tmp\": %w`, err)\n+ }\nreturn nil\n}\n@@ -199,8 +200,6 @@ func (c *containerMounter) prepareMountsVFS2() ([]mountAndFD, error) {\nreturn mounts, nil\n}\n-// TODO(gvisor.dev/issue/1487): Implement submount options similar to the VFS1\n-// version.\nfunc (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *Config, mns *vfs.MountNamespace, creds *auth.Credentials, submount *mountAndFD) error {\nroot := mns.Root()\ndefer root.DecRef()\n@@ -209,12 +208,11 @@ func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *Config,\nStart: root,\nPath: fspath.Parse(submount.Destination),\n}\n-\n- fsName, options, useOverlay, err := c.getMountNameAndOptionsVFS2(conf, submount)\n+ fsName, opts, err := c.getMountNameAndOptionsVFS2(conf, submount)\nif err != nil {\nreturn fmt.Errorf(\"mountOptions failed: %w\", err)\n}\n- if fsName == \"\" {\n+ if len(fsName) == 0 {\n// Filesystem is not supported (e.g. cgroup), just skip it.\nreturn nil\n}\n@@ -222,17 +220,6 @@ func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *Config,\nif err := c.makeSyntheticMount(ctx, submount.Destination, root, creds); err != nil {\nreturn err\n}\n-\n- opts := &vfs.MountOptions{\n- GetFilesystemOptions: vfs.GetFilesystemOptions{\n- Data: strings.Join(options, \",\"),\n- },\n- InternalMount: true,\n- }\n-\n- // All writes go to upper, be paranoid and make lower readonly.\n- opts.ReadOnly = useOverlay\n-\nif err := c.k.VFS().MountAt(ctx, creds, \"\", target, fsName, opts); err != nil {\nreturn fmt.Errorf(\"failed to mount %q (type: %s): %w, opts: %v\", submount.Destination, submount.Type, err, opts)\n}\n@@ -242,13 +229,13 @@ func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *Config,\n// getMountNameAndOptionsVFS2 retrieves the fsName, opts, and useOverlay values\n// used for mounts.\n-func (c *containerMounter) getMountNameAndOptionsVFS2(conf *Config, m *mountAndFD) (string, []string, bool, error) {\n+func (c *containerMounter) getMountNameAndOptionsVFS2(conf *Config, m *mountAndFD) (string, *vfs.MountOptions, error) {\nvar (\nfsName string\n- opts []string\n- useOverlay bool\n+ data []string\n)\n+ // Find filesystem name and FS specific data field.\nswitch m.Type {\ncase devpts.Name, devtmpfs.Name, proc.Name, sys.Name:\nfsName = m.Type\n@@ -258,21 +245,46 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *Config, m *mountAndF\nfsName = m.Type\nvar err error\n- opts, err = parseAndFilterOptions(m.Options, tmpfsAllowedOptions...)\n+ data, err = parseAndFilterOptions(m.Options, tmpfsAllowedData...)\nif err != nil {\n- return \"\", nil, false, err\n+ return \"\", nil, err\n}\ncase bind:\nfsName = gofer.Name\n- opts = p9MountOptions(m.fd, c.getMountAccessType(m.Mount), true /* vfs2 */)\n- // If configured, add overlay to all writable mounts.\n- useOverlay = conf.Overlay && !mountFlags(m.Options).ReadOnly\n+ data = p9MountData(m.fd, c.getMountAccessType(m.Mount), true /* vfs2 */)\ndefault:\nlog.Warningf(\"ignoring unknown filesystem type %q\", m.Type)\n}\n- return fsName, opts, useOverlay, nil\n+\n+ opts := &vfs.MountOptions{\n+ GetFilesystemOptions: vfs.GetFilesystemOptions{\n+ Data: strings.Join(data, \",\"),\n+ },\n+ InternalMount: true,\n+ }\n+\n+ for _, o := range m.Options {\n+ switch o {\n+ case \"rw\":\n+ opts.ReadOnly = false\n+ case \"ro\":\n+ opts.ReadOnly = true\n+ case \"noatime\":\n+ // TODO(gvisor.dev/issue/1193): Implement MS_NOATIME.\n+ case \"noexec\":\n+ opts.Flags.NoExec = true\n+ default:\n+ log.Warningf(\"ignoring unknown mount option %q\", o)\n+ }\n+ }\n+\n+ if conf.Overlay {\n+ // All writes go to upper, be paranoid and make lower readonly.\n+ opts.ReadOnly = true\n+ }\n+ return fsName, opts, nil\n}\nfunc (c *containerMounter) makeSyntheticMount(ctx context.Context, currentPath string, root vfs.VirtualDentry, creds *auth.Credentials) error {\n@@ -301,3 +313,63 @@ func (c *containerMounter) makeSyntheticMount(ctx context.Context, currentPath s\n}\nreturn nil\n}\n+\n+// mountTmpVFS2 mounts an internal tmpfs at '/tmp' if it's safe to do so.\n+// Technically we don't have to mount tmpfs at /tmp, as we could just rely on\n+// the host /tmp, but this is a nice optimization, and fixes some apps that call\n+// mknod in /tmp. It's unsafe to mount tmpfs if:\n+// 1. /tmp is mounted explicitly: we should not override user's wish\n+// 2. /tmp is not empty: mounting tmpfs would hide existing files in /tmp\n+//\n+// Note that when there are submounts inside of '/tmp', directories for the\n+// mount points must be present, making '/tmp' not empty anymore.\n+func (c *containerMounter) mountTmpVFS2(ctx context.Context, conf *Config, creds *auth.Credentials, mns *vfs.MountNamespace) error {\n+ for _, m := range c.mounts {\n+ // m.Destination has been cleaned, so it's to use equality here.\n+ if m.Destination == \"/tmp\" {\n+ log.Debugf(`Explict \"/tmp\" mount found, skipping internal tmpfs, mount: %+v`, m)\n+ return nil\n+ }\n+ }\n+\n+ root := mns.Root()\n+ defer root.DecRef()\n+ pop := vfs.PathOperation{\n+ Root: root,\n+ Start: root,\n+ Path: fspath.Parse(\"/tmp\"),\n+ }\n+ // TODO(gvisor.dev/issue/2782): Use O_PATH when available.\n+ statx, err := c.k.VFS().StatAt(ctx, creds, &pop, &vfs.StatOptions{})\n+ switch err {\n+ case nil:\n+ // Found '/tmp' in filesystem, check if it's empty.\n+ if linux.FileMode(statx.Mode).FileType() != linux.ModeDirectory {\n+ // Not a dir?! Leave it be.\n+ return nil\n+ }\n+ if statx.Nlink > 2 {\n+ // If more than \".\" and \"..\" is found, skip internal tmpfs to prevent\n+ // hiding existing files.\n+ log.Infof(`Skipping internal tmpfs mount for \"/tmp\" because it's not empty`)\n+ return nil\n+ }\n+ log.Infof(`Mounting internal tmpfs on top of empty \"/tmp\"`)\n+ fallthrough\n+\n+ case syserror.ENOENT:\n+ // No '/tmp' found (or fallthrough from above). It's safe to mount internal\n+ // tmpfs.\n+ tmpMount := specs.Mount{\n+ Type: tmpfs.Name,\n+ Destination: \"/tmp\",\n+ // Sticky bit is added to prevent accidental deletion of files from\n+ // another user. This is normally done for /tmp.\n+ Options: []string{\"mode=01777\"},\n+ }\n+ return c.mountSubmountVFS2(ctx, conf, mns, creds, &mountAndFD{Mount: tmpMount})\n+\n+ default:\n+ return fmt.Errorf(`stating \"/tmp\" inside container: %w`, err)\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/BUILD", "new_path": "runsc/container/BUILD", "diff": "@@ -47,7 +47,7 @@ go_test(\n\"//test/cmd/test_app\",\n],\nlibrary = \":container\",\n- shard_count = 5,\n+ shard_count = 10,\ntags = [\n\"requires-kvm\",\n],\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/console_test.go", "new_path": "runsc/container/console_test.go", "diff": "@@ -119,7 +119,7 @@ func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {\n// Test that an pty FD is sent over the console socket if one is provided.\nfunc TestConsoleSocket(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configsWithVFS2(t, all...) {\nt.Run(name, func(t *testing.T) {\nspec := testutil.NewSpecWithArgs(\"true\")\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -256,8 +256,6 @@ var (\nfunc configs(t *testing.T, opts ...configOption) map[string]*boot.Config {\n// Always load the default config.\ncs := make(map[string]*boot.Config)\n- cs[\"default\"] = testutil.TestConfig(t)\n-\nfor _, o := range opts {\nswitch o {\ncase overlay:\n@@ -285,9 +283,16 @@ func configs(t *testing.T, opts ...configOption) map[string]*boot.Config {\nfunc configsWithVFS2(t *testing.T, opts ...configOption) map[string]*boot.Config {\nvfs1 := configs(t, opts...)\n- vfs2 := configs(t, opts...)\n- for key, value := range vfs2 {\n+ var optsVFS2 []configOption\n+ for _, opt := range opts {\n+ // TODO(gvisor.dev/issue/1487): Enable overlay tests.\n+ if opt != overlay {\n+ optsVFS2 = append(optsVFS2, opt)\n+ }\n+ }\n+\n+ for key, value := range configs(t, optsVFS2...) {\nvalue.VFS2 = true\nvfs1[key+\"VFS2\"] = value\n}\n@@ -603,7 +608,7 @@ func doAppExitStatus(t *testing.T, vfs2 bool) {\n// TestExec verifies that a container can exec a new program.\nfunc TestExec(t *testing.T) {\n- for name, conf := range configs(t, overlay) {\n+ for name, conf := range configsWithVFS2(t, overlay) {\nt.Run(name, func(t *testing.T) {\nconst uid = 343\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n@@ -695,7 +700,7 @@ func TestExec(t *testing.T) {\n// TestKillPid verifies that we can signal individual exec'd processes.\nfunc TestKillPid(t *testing.T) {\n- for name, conf := range configs(t, overlay) {\n+ for name, conf := range configsWithVFS2(t, overlay) {\nt.Run(name, func(t *testing.T) {\napp, err := testutil.FindFile(\"test/cmd/test_app/test_app\")\nif err != nil {\n@@ -1211,7 +1216,7 @@ func TestCapabilities(t *testing.T) {\nuid := auth.KUID(os.Getuid() + 1)\ngid := auth.KGID(os.Getgid() + 1)\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configsWithVFS2(t, all...) {\nt.Run(name, func(t *testing.T) {\nspec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\nrootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n@@ -1409,7 +1414,7 @@ func TestReadonlyRoot(t *testing.T) {\n}\nfunc TestUIDMap(t *testing.T) {\n- for name, conf := range configs(t, noOverlay...) {\n+ for name, conf := range configsWithVFS2(t, noOverlay...) {\nt.Run(name, func(t *testing.T) {\ntestDir, err := ioutil.TempDir(testutil.TmpDir(), \"test-mount\")\nif err != nil {\n@@ -1886,7 +1891,7 @@ func doDestroyStartingTest(t *testing.T, vfs2 bool) {\n}\nfunc TestCreateWorkingDir(t *testing.T) {\n- for name, conf := range configs(t, overlay) {\n+ for name, conf := range configsWithVFS2(t, overlay) {\nt.Run(name, func(t *testing.T) {\ntmpDir, err := ioutil.TempDir(testutil.TmpDir(), \"cwd-create\")\nif err != nil {\n@@ -2009,7 +2014,7 @@ func TestMountPropagation(t *testing.T) {\n}\nfunc TestMountSymlink(t *testing.T) {\n- for name, conf := range configs(t, overlay) {\n+ for name, conf := range configsWithVFS2(t, overlay) {\nt.Run(name, func(t *testing.T) {\ndir, err := ioutil.TempDir(testutil.TmpDir(), \"mount-symlink\")\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -129,7 +129,7 @@ func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) {\n// TestMultiContainerSanity checks that it is possible to run 2 dead-simple\n// containers in the same sandbox.\nfunc TestMultiContainerSanity(t *testing.T) {\n- for name, conf := range configs(t, all...) {\n+ for name, conf := range configsWithVFS2(t, all...) {\nt.Run(name, func(t *testing.T) {\nrootDir, cleanup, err := testutil.SetupRootDir()\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/namespace.go", "new_path": "runsc/specutils/namespace.go", "diff": "@@ -18,6 +18,7 @@ import (\n\"fmt\"\n\"os\"\n\"os/exec\"\n+ \"os/signal\"\n\"path/filepath\"\n\"runtime\"\n\"syscall\"\n@@ -261,7 +262,18 @@ func MaybeRunAsRoot() error {\ncmd.Stdin = os.Stdin\ncmd.Stdout = os.Stdout\ncmd.Stderr = os.Stderr\n- if err := cmd.Run(); err != nil {\n+ if err := cmd.Start(); err != nil {\n+ return fmt.Errorf(\"re-executing self: %w\", err)\n+ }\n+ ch := make(chan os.Signal, 1)\n+ signal.Notify(ch)\n+ go func() {\n+ for {\n+ // Forward all signals to child process.\n+ cmd.Process.Signal(<-ch)\n+ }\n+ }()\n+ if err := cmd.Wait(); err != nil {\nif exit, ok := err.(*exec.ExitError); ok {\nif ws, ok := exit.Sys().(syscall.WaitStatus); ok {\nos.Exit(ws.ExitStatus())\n@@ -269,7 +281,7 @@ func MaybeRunAsRoot() error {\nlog.Warningf(\"No wait status provided, exiting with -1: %v\", err)\nos.Exit(-1)\n}\n- return fmt.Errorf(\"re-executing self: %v\", err)\n+ return err\n}\n// Child completed with success.\nos.Exit(0)\n" } ]
Go
Apache License 2.0
google/gvisor
More runsc changes for VFS2 - Add /tmp handling - Apply mount options - Enable more container_test tests - Forward signals to child process when test respaws process to run as root inside namespace. Updates #1487 PiperOrigin-RevId: 314263281
259,992
01.06.2020 23:01:35
25,200
4b5eae39f201ffbe7f4a0e08a28380099469efe8
Enable VFS2 to runsc syscall tests Updates
[ { "change_type": "MODIFY", "old_path": "test/runner/defs.bzl", "new_path": "test/runner/defs.bzl", "diff": "@@ -60,7 +60,8 @@ def _syscall_test(\nnetwork = \"none\",\nfile_access = \"exclusive\",\noverlay = False,\n- add_uds_tree = False):\n+ add_uds_tree = False,\n+ vfs2 = False):\n# Prepend \"runsc\" to non-native platform names.\nfull_platform = platform if platform == \"native\" else \"runsc_\" + platform\n@@ -70,6 +71,8 @@ def _syscall_test(\nname += \"_shared\"\nif overlay:\nname += \"_overlay\"\n+ if vfs2:\n+ name += \"_vfs2\"\nif network != \"none\":\nname += \"_\" + network + \"net\"\n@@ -102,6 +105,7 @@ def _syscall_test(\n\"--file-access=\" + file_access,\n\"--overlay=\" + str(overlay),\n\"--add-uds-tree=\" + str(add_uds_tree),\n+ \"--vfs2=\" + str(vfs2),\n]\n# Call the rule above.\n@@ -123,6 +127,7 @@ def syscall_test(\nadd_overlay = False,\nadd_uds_tree = False,\nadd_hostinet = False,\n+ vfs2 = False,\ntags = None):\n\"\"\"syscall_test is a macro that will create targets for all platforms.\n@@ -160,6 +165,29 @@ def syscall_test(\ntags = platform_tags + tags,\n)\n+ vfs2_tags = list(tags)\n+ if vfs2:\n+ # Add tag to easily run VFS2 tests with --test_tag_filters=vfs2\n+ vfs2_tags.append(\"vfs2\")\n+\n+ else:\n+ # Don't automatically run tests tests not yet passing.\n+ vfs2_tags.append(\"manual\")\n+ vfs2_tags.append(\"noguitar\")\n+ vfs2_tags.append(\"notap\")\n+\n+ _syscall_test(\n+ test = test,\n+ shard_count = shard_count,\n+ size = size,\n+ platform = default_platform,\n+ use_tmpfs = use_tmpfs,\n+ add_uds_tree = add_uds_tree,\n+ tags = platforms[default_platform] + vfs2_tags,\n+ vfs2 = True,\n+ )\n+\n+ # TODO(gvisor.dev/issue/1487): Enable VFS2 overlay tests.\nif add_overlay:\n_syscall_test(\ntest = test,\n@@ -172,6 +200,18 @@ def syscall_test(\noverlay = True,\n)\n+ if add_hostinet:\n+ _syscall_test(\n+ test = test,\n+ shard_count = shard_count,\n+ size = size,\n+ platform = default_platform,\n+ use_tmpfs = use_tmpfs,\n+ network = \"host\",\n+ add_uds_tree = add_uds_tree,\n+ tags = platforms[default_platform] + tags,\n+ )\n+\nif not use_tmpfs:\n# Also test shared gofer access.\n_syscall_test(\n@@ -184,15 +224,14 @@ def syscall_test(\ntags = platforms[default_platform] + tags,\nfile_access = \"shared\",\n)\n-\n- if add_hostinet:\n_syscall_test(\ntest = test,\nshard_count = shard_count,\nsize = size,\nplatform = default_platform,\nuse_tmpfs = use_tmpfs,\n- network = \"host\",\nadd_uds_tree = add_uds_tree,\n- tags = platforms[default_platform] + tags,\n+ tags = platforms[default_platform] + vfs2_tags,\n+ file_access = \"shared\",\n+ vfs2 = True,\n)\n" }, { "change_type": "MODIFY", "old_path": "test/runner/runner.go", "new_path": "test/runner/runner.go", "diff": "@@ -46,6 +46,7 @@ var (\nuseTmpfs = flag.Bool(\"use-tmpfs\", false, \"mounts tmpfs for /tmp\")\nfileAccess = flag.String(\"file-access\", \"exclusive\", \"mounts root in exclusive or shared mode\")\noverlay = flag.Bool(\"overlay\", false, \"wrap filesystem mounts with writable tmpfs overlay\")\n+ vfs2 = flag.Bool(\"vfs2\", false, \"enable VFS2\")\nparallel = flag.Bool(\"parallel\", false, \"run tests in parallel\")\nrunscPath = flag.String(\"runsc\", \"\", \"path to runsc binary\")\n@@ -146,6 +147,9 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\nif *overlay {\nargs = append(args, \"-overlay\")\n}\n+ if *vfs2 {\n+ args = append(args, \"-vfs2\")\n+ }\nif *debug {\nargs = append(args, \"-debug\", \"-log-packets=true\")\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -2,22 +2,33 @@ load(\"//test/runner:defs.bzl\", \"syscall_test\")\npackage(licenses = [\"notice\"])\n-syscall_test(test = \"//test/syscalls/linux:32bit_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:32bit_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:accept_bind_stream_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:accept_bind_stream_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:accept_bind_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:access_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:affinity_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:affinity_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\n@@ -28,11 +39,18 @@ syscall_test(\nsize = \"medium\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:alarm_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:arch_prctl_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:arch_prctl_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:bad_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:bad_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"large\",\n@@ -40,9 +58,15 @@ syscall_test(\ntest = \"//test/syscalls/linux:bind_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:brk_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:brk_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"large\",\n@@ -51,16 +75,19 @@ syscall_test(\n# involve much concurrency, TSAN's usefulness here is limited anyway.\ntags = [\"nogotsan\"],\ntest = \"//test/syscalls/linux:socket_stress_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:chdir_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:chmod_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -68,6 +95,7 @@ syscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:chown_test\",\nuse_tmpfs = True, # chwon tests require gofer to be running as root.\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -75,45 +103,70 @@ syscall_test(\ntest = \"//test/syscalls/linux:chroot_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:clock_getres_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:clock_getres_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:clock_gettime_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:clock_nanosleep_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:clock_nanosleep_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:concurrency_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:concurrency_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_uds_tree = True,\ntest = \"//test/syscalls/linux:connect_external_test\",\nuse_tmpfs = True,\n+ vfs2 = \"True\",\n)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:creat_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:dev_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:dev_test\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:dup_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:epoll_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:epoll_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:eventfd_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:eventfd_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:exceptions_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:exceptions_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:exec_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -122,7 +175,10 @@ syscall_test(\ntest = \"//test/syscalls/linux:exec_binary_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:exit_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:exit_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\n@@ -134,11 +190,15 @@ syscall_test(\ntest = \"//test/syscalls/linux:fallocate_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:fault_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:fault_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:fchdir_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -152,11 +212,20 @@ syscall_test(\ntest = \"//test/syscalls/linux:flock_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:fork_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:fork_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:fpsig_fork_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:fpsig_fork_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:fpsig_nested_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:fpsig_nested_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\n@@ -167,20 +236,33 @@ syscall_test(\nsize = \"medium\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:futex_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:getcpu_host_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:getcpu_host_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:getcpu_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:getcpu_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:getdents_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:getrandom_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:getrandom_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:getrusage_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:getrusage_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\n@@ -196,15 +278,20 @@ syscall_test(\nsyscall_test(\ntest = \"//test/syscalls/linux:iptables_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"large\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:itimer_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:kill_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:kill_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\n@@ -215,19 +302,33 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:lseek_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:madvise_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:madvise_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:memory_accounting_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:memory_accounting_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:mempolicy_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:mempolicy_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:mincore_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:mincore_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:mkdir_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -249,20 +350,29 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:mremap_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:msync_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:munmap_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:munmap_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:network_namespace_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:network_namespace_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:open_create_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -270,40 +380,65 @@ syscall_test(\ntest = \"//test/syscalls/linux:open_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:packet_socket_raw_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:packet_socket_raw_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:packet_socket_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:packet_socket_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:partial_bad_buffer_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:partial_bad_buffer_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:pause_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:pause_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"large\",\nadd_overlay = True,\nshard_count = 5,\ntest = \"//test/syscalls/linux:pipe_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:poll_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:poll_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:ppoll_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:prctl_setuid_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:prctl_setuid_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:prctl_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:prctl_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:pread64_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:preadv_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -311,36 +446,56 @@ syscall_test(\ntest = \"//test/syscalls/linux:preadv2_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:priority_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:priority_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:proc_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:proc_net_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:proc_net_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:proc_pid_oomscore_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:proc_pid_oomscore_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:proc_pid_smaps_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:proc_pid_smaps_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:proc_pid_uid_gid_map_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:proc_pid_uid_gid_map_test\",\n+)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:pselect_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:ptrace_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:ptrace_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:pty_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\ntest = \"//test/syscalls/linux:pty_root_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -351,17 +506,28 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:pwrite64_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:raw_socket_hdrincl_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:raw_socket_hdrincl_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:raw_socket_icmp_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:raw_socket_icmp_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:raw_socket_ipv4_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:raw_socket_ipv4_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:read_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -373,12 +539,14 @@ syscall_test(\nsize = \"medium\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:readv_socket_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:readv_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -387,25 +555,50 @@ syscall_test(\ntest = \"//test/syscalls/linux:rename_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:rlimits_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:rlimits_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:rseq_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:rseq_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:rtsignal_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:rtsignal_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:signalfd_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:signalfd_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sched_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sched_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sched_yield_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sched_yield_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:seccomp_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:seccomp_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:select_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:select_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nshard_count = 20,\ntest = \"//test/syscalls/linux:semaphore_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -421,49 +614,68 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:splice_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:sigaction_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sigaction_test\",\n+ vfs2 = \"True\",\n+)\n# TODO(b/119826902): Enable once the test passes in runsc.\n-# syscall_test(test = \"//test/syscalls/linux:sigaltstack_test\")\n+# syscall_test(vfs2=\"True\",test = \"//test/syscalls/linux:sigaltstack_test\")\n-syscall_test(test = \"//test/syscalls/linux:sigiret_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sigiret_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sigprocmask_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sigprocmask_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:sigstop_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:sigtimedwait_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sigtimedwait_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:shm_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_abstract_non_blocking_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_abstract_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_domain_non_blocking_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_domain_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -489,58 +701,90 @@ syscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_ip_tcp_generic_loopback_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_ip_tcp_loopback_non_blocking_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_ip_tcp_loopback_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_ip_tcp_udp_generic_loopback_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_ip_udp_loopback_non_blocking_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_ip_udp_loopback_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_ipv4_udp_unbound_loopback_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:socket_ip_unbound_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_ip_unbound_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_netdevice_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_netdevice_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_netlink_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_netlink_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_netlink_route_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_netlink_route_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_netlink_uevent_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_netlink_uevent_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_blocking_local_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_blocking_local_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_blocking_ip_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_blocking_ip_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_non_stream_blocking_local_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_non_stream_blocking_local_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:socket_non_stream_blocking_udp_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_non_stream_blocking_udp_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"large\",\n@@ -550,6 +794,7 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\ntest = \"//test/syscalls/linux:socket_stream_blocking_tcp_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -572,6 +817,7 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_unix_dgram_non_blocking_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -579,6 +825,7 @@ syscall_test(\nadd_overlay = True,\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_unix_pair_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -596,11 +843,13 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_unix_unbound_abstract_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_unix_unbound_dgram_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -612,6 +861,7 @@ syscall_test(\nsize = \"medium\",\nshard_count = 10,\ntest = \"//test/syscalls/linux:socket_unix_unbound_seqpacket_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -623,6 +873,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:statfs_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -633,6 +884,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:stat_times_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -648,6 +900,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:sync_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -655,86 +908,151 @@ syscall_test(\ntest = \"//test/syscalls/linux:sync_file_range_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:sysinfo_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sysinfo_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:syslog_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:syslog_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:sysret_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:sysret_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\nshard_count = 10,\ntest = \"//test/syscalls/linux:tcp_socket_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:tgkill_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:tgkill_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:timerfd_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:timerfd_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:timers_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:timers_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:time_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:time_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:tkill_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:tkill_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:truncate_test\",\n)\n-syscall_test(test = \"//test/syscalls/linux:tuntap_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:tuntap_test\",\n+)\nsyscall_test(\nadd_hostinet = True,\ntest = \"//test/syscalls/linux:tuntap_hostinet_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:udp_bind_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:udp_bind_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\nadd_hostinet = True,\nshard_count = 10,\ntest = \"//test/syscalls/linux:udp_socket_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:uidgid_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:uidgid_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:uname_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:uname_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:unlink_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:unshare_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:unshare_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:utimes_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:utimes_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:vdso_clock_gettime_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:vdso_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:vdso_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:vsyscall_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:vsyscall_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:vfork_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:vfork_test\",\n+ vfs2 = \"True\",\n+)\nsyscall_test(\nsize = \"medium\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:wait_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:write_test\",\n+ vfs2 = \"True\",\n)\n-syscall_test(test = \"//test/syscalls/linux:proc_net_unix_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:proc_net_unix_test\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:proc_net_tcp_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:proc_net_tcp_test\",\n+ vfs2 = \"True\",\n+)\n-syscall_test(test = \"//test/syscalls/linux:proc_net_udp_test\")\n+syscall_test(\n+ test = \"//test/syscalls/linux:proc_net_udp_test\",\n+ vfs2 = \"True\",\n+)\n" } ]
Go
Apache License 2.0
google/gvisor
Enable VFS2 to runsc syscall tests Updates #1487 PiperOrigin-RevId: 314271995
260,022
02.06.2020 22:08:23
0
5173c960212a2d78d69e94f028629cbcac8c04f9
Add some detail to milestone This change adds more information about what needs to be done to implement `/dev/fuse`
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/g3doc/fuse.md", "new_path": "pkg/sentry/fs/g3doc/fuse.md", "diff": "@@ -76,7 +76,8 @@ ops can be implemented in parallel.\n#### Minimal client that can mount a trivial FUSE filesystem.\n-- Implement `/dev/fuse`.\n+- Implement `/dev/fuse` - a character device used to establish an FD for\n+ communication between the sentry and the server daemon.\n- Implement basic FUSE ops like `FUSE_INIT`, `FUSE_DESTROY`.\n" } ]
Go
Apache License 2.0
google/gvisor
Add some detail to milestone #1 This change adds more information about what needs to be done to implement `/dev/fuse`
259,853
02.06.2020 19:17:53
25,200
e6334e81ca8d951e56f03d8ea0629e3c85556cf1
Check that two sockets with different types can't be connected to each other
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/transport/connectioned.go", "new_path": "pkg/sentry/socket/unix/transport/connectioned.go", "diff": "@@ -252,7 +252,7 @@ func (e *connectionedEndpoint) Close() {\n// BidirectionalConnect implements BoundEndpoint.BidirectionalConnect.\nfunc (e *connectionedEndpoint) BidirectionalConnect(ctx context.Context, ce ConnectingEndpoint, returnConnect func(Receiver, ConnectedEndpoint)) *syserr.Error {\nif ce.Type() != e.stype {\n- return syserr.ErrConnectionRefused\n+ return syserr.ErrWrongProtocolForSocket\n}\n// Check if ce is e to avoid a deadlock.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -417,7 +417,18 @@ func (s *socketOpsCommon) Connect(t *kernel.Task, sockaddr []byte, blocking bool\ndefer ep.Release()\n// Connect the server endpoint.\n- return s.ep.Connect(t, ep)\n+ err = s.ep.Connect(t, ep)\n+\n+ if err == syserr.ErrWrongProtocolForSocket {\n+ // Linux for abstract sockets returns ErrConnectionRefused\n+ // instead of ErrWrongProtocolForSocket.\n+ path, _ := extractPath(sockaddr)\n+ if len(path) > 0 && path[0] == 0 {\n+ err = syserr.ErrConnectionRefused\n+ }\n+ }\n+\n+ return err\n}\n// Write implements fs.FileOperations.Write.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/accept_bind.cc", "new_path": "test/syscalls/linux/accept_bind.cc", "diff": "// limitations under the License.\n#include <stdio.h>\n+#include <sys/socket.h>\n#include <sys/un.h>\n#include <algorithm>\n@@ -141,6 +142,47 @@ TEST_P(AllSocketPairTest, Connect) {\nSyscallSucceeds());\n}\n+TEST_P(AllSocketPairTest, ConnectWithWrongType) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int type;\n+ socklen_t typelen = sizeof(type);\n+ EXPECT_THAT(\n+ getsockopt(sockets->first_fd(), SOL_SOCKET, SO_TYPE, &type, &typelen),\n+ SyscallSucceeds());\n+ switch (type) {\n+ case SOCK_STREAM:\n+ type = SOCK_SEQPACKET;\n+ break;\n+ case SOCK_SEQPACKET:\n+ type = SOCK_STREAM;\n+ break;\n+ }\n+\n+ const FileDescriptor another_socket =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, type, 0));\n+\n+ ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallSucceeds());\n+\n+ ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds());\n+\n+ if (sockets->first_addr()->sa_data[0] != 0) {\n+ ASSERT_THAT(connect(another_socket.get(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallFailsWithErrno(EPROTOTYPE));\n+ } else {\n+ ASSERT_THAT(connect(another_socket.get(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallFailsWithErrno(ECONNREFUSED));\n+ }\n+\n+ ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(),\n+ sockets->first_addr_size()),\n+ SyscallSucceeds());\n+}\n+\nTEST_P(AllSocketPairTest, ConnectNonListening) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n" } ]
Go
Apache License 2.0
google/gvisor
Check that two sockets with different types can't be connected to each other PiperOrigin-RevId: 314450191
260,023
03.06.2020 08:48:23
25,200
162848e129e50e25c3bb9c5fdc337584b3531da0
Avoid TCP segment split when out of sender window. If the entire segment cannot be accommodated in the receiver advertised window and if there are still unacknowledged pending segments, skip splitting the segment. The segment transmit would get retried by the retransmit handler.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/snd.go", "new_path": "pkg/tcpip/transport/tcp/snd.go", "diff": "@@ -816,6 +816,25 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se\npanic(\"Netstack queues FIN segments without data.\")\n}\n+ segEnd = seg.sequenceNumber.Add(seqnum.Size(seg.data.Size()))\n+ // If the entire segment cannot be accomodated in the receiver\n+ // advertized window, skip splitting and sending of the segment.\n+ // ref: net/ipv4/tcp_output.c::tcp_snd_wnd_test()\n+ //\n+ // Linux checks this for all segment transmits not triggered\n+ // by a probe timer. On this condition, it defers the segment\n+ // split and transmit to a short probe timer.\n+ // ref: include/net/tcp.h::tcp_check_probe_timer()\n+ // ref: net/ipv4/tcp_output.c::tcp_write_wakeup()\n+ //\n+ // Instead of defining a new transmit timer, we attempt to split the\n+ // segment right here if there are no pending segments.\n+ // If there are pending segments, segment transmits are deferred\n+ // to the retransmit timer handler.\n+ if s.sndUna != s.sndNxt && !segEnd.LessThan(end) {\n+ return false\n+ }\n+\nif !seg.sequenceNumber.LessThan(end) {\nreturn false\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/BUILD", "new_path": "test/packetimpact/tests/BUILD", "diff": "@@ -124,10 +124,8 @@ packetimpact_go_test(\n)\npacketimpact_go_test(\n- name = \"tcp_should_piggyback\",\n- srcs = [\"tcp_should_piggyback_test.go\"],\n- # TODO(b/153680566): Fix netstack then remove the line below.\n- expect_netstack_failure = True,\n+ name = \"tcp_send_window_sizes_piggyback\",\n+ srcs = [\"tcp_send_window_sizes_piggyback_test.go\"],\ndeps = [\n\"//pkg/tcpip/header\",\n\"//test/packetimpact/testbench\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp_send_window_sizes_piggyback_test\n+\n+import (\n+ \"flag\"\n+ \"fmt\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ tb \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ tb.RegisterFlags(flag.CommandLine)\n+}\n+\n+// TestSendWindowSizesPiggyback tests cases where segment sizes are close to\n+// sender window size and checks for ACK piggybacking for each of those case.\n+func TestSendWindowSizesPiggyback(t *testing.T) {\n+ sampleData := []byte(\"Sample Data\")\n+ segmentSize := uint16(len(sampleData))\n+ // Advertise receive window sizes that are lesser, equal to or greater than\n+ // enqueued segment size and check for segment transmits. The test attempts\n+ // to enqueue a segment on the dut before acknowledging previous segment and\n+ // lets the dut piggyback any ACKs along with the enqueued segment.\n+ for _, tt := range []struct {\n+ description string\n+ windowSize uint16\n+ expectedPayload1 []byte\n+ expectedPayload2 []byte\n+ enqueue bool\n+ }{\n+ // Expect the first segment to be split as it cannot be accomodated in\n+ // the sender window. This means we need not enqueue a new segment after\n+ // the first segment.\n+ {\"WindowSmallerThanSegment\", segmentSize - 1, sampleData[:(segmentSize - 1)], sampleData[(segmentSize - 1):], false /* enqueue */},\n+\n+ {\"WindowEqualToSegment\", segmentSize, sampleData, sampleData, true /* enqueue */},\n+\n+ // Expect the second segment to not be split as its size is greater than\n+ // the available sender window size. The segments should not be split\n+ // when there is pending unacknowledged data and the segment-size is\n+ // greater than available sender window.\n+ {\"WindowGreaterThanSegment\", segmentSize + 1, sampleData, sampleData, true /* enqueue */},\n+ } {\n+ t.Run(fmt.Sprintf(\"%s%d\", tt.description, tt.windowSize), func(t *testing.T) {\n+ dut := tb.NewDUT(t)\n+ defer dut.TearDown()\n+ listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n+ defer dut.Close(listenFd)\n+\n+ conn := tb.NewTCPIPv4(t, tb.TCP{DstPort: &remotePort, WindowSize: tb.Uint16(tt.windowSize)}, tb.TCP{SrcPort: &remotePort})\n+ defer conn.Close()\n+\n+ conn.Handshake()\n+ acceptFd, _ := dut.Accept(listenFd)\n+ defer dut.Close(acceptFd)\n+\n+ dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)\n+\n+ expectedTCP := tb.TCP{Flags: tb.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}\n+\n+ dut.Send(acceptFd, sampleData, 0)\n+ expectedPayload := tb.Payload{Bytes: tt.expectedPayload1}\n+ if _, err := conn.ExpectData(&expectedTCP, &expectedPayload, time.Second); err != nil {\n+ t.Fatalf(\"Expected %s but didn't get one: %s\", tb.Layers{&expectedTCP, &expectedPayload}, err)\n+ }\n+\n+ // Expect any enqueued segment to be transmitted by the dut along with\n+ // piggybacked ACK for our data.\n+\n+ if tt.enqueue {\n+ // Enqueue a segment for the dut to transmit.\n+ dut.Send(acceptFd, sampleData, 0)\n+ }\n+\n+ // Send ACK for the previous segment along with data for the dut to\n+ // receive and ACK back. Sending this ACK would make room for the dut\n+ // to transmit any enqueued segment.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: tb.Uint16(tt.windowSize)}, &tb.Payload{Bytes: sampleData})\n+\n+ // Expect the dut to piggyback the ACK for received data along with\n+ // the segment enqueued for transmit.\n+ expectedPayload = tb.Payload{Bytes: tt.expectedPayload2}\n+ if _, err := conn.ExpectData(&expectedTCP, &expectedPayload, time.Second); err != nil {\n+ t.Fatalf(\"Expected %s but didn't get one: %s\", tb.Layers{&expectedTCP, &expectedPayload}, err)\n+ }\n+ })\n+ }\n+}\n" }, { "change_type": "DELETE", "old_path": "test/packetimpact/tests/tcp_should_piggyback_test.go", "new_path": null, "diff": "-// Copyright 2020 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package tcp_should_piggyback_test\n-\n-import (\n- \"flag\"\n- \"testing\"\n- \"time\"\n-\n- \"golang.org/x/sys/unix\"\n- \"gvisor.dev/gvisor/pkg/tcpip/header\"\n- tb \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n-)\n-\n-func init() {\n- tb.RegisterFlags(flag.CommandLine)\n-}\n-\n-func TestPiggyback(t *testing.T) {\n- dut := tb.NewDUT(t)\n- defer dut.TearDown()\n- listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n- defer dut.Close(listenFd)\n- conn := tb.NewTCPIPv4(t, tb.TCP{DstPort: &remotePort, WindowSize: tb.Uint16(12)}, tb.TCP{SrcPort: &remotePort})\n- defer conn.Close()\n-\n- conn.Handshake()\n- acceptFd, _ := dut.Accept(listenFd)\n- defer dut.Close(acceptFd)\n-\n- dut.SetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)\n-\n- sampleData := []byte(\"Sample Data\")\n-\n- dut.Send(acceptFd, sampleData, 0)\n- expectedTCP := tb.TCP{Flags: tb.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}\n- expectedPayload := tb.Payload{Bytes: sampleData}\n- if _, err := conn.ExpectData(&expectedTCP, &expectedPayload, time.Second); err != nil {\n- t.Fatalf(\"Expected %v but didn't get one: %s\", tb.Layers{&expectedTCP, &expectedPayload}, err)\n- }\n-\n- // Cause DUT to send us more data as soon as we ACK their first data segment because we have\n- // a small window.\n- dut.Send(acceptFd, sampleData, 0)\n-\n- // DUT should ACK our segment by piggybacking ACK to their outstanding data segment instead of\n- // sending a separate ACK packet.\n- conn.Send(expectedTCP, &expectedPayload)\n- if _, err := conn.ExpectData(&expectedTCP, &expectedPayload, time.Second); err != nil {\n- t.Fatalf(\"Expected %v but didn't get one: %s\", tb.Layers{&expectedTCP, &expectedPayload}, err)\n- }\n-}\n" } ]
Go
Apache License 2.0
google/gvisor
Avoid TCP segment split when out of sender window. If the entire segment cannot be accommodated in the receiver advertised window and if there are still unacknowledged pending segments, skip splitting the segment. The segment transmit would get retried by the retransmit handler. PiperOrigin-RevId: 314538523
259,885
03.06.2020 11:01:34
25,200
c8e79683891db9e780449112f78ee4004a2de833
Take Mount reference in VFS.connectLocked. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/README.md", "new_path": "pkg/sentry/vfs/README.md", "diff": "@@ -39,8 +39,8 @@ Mount references are held by:\n- Mount: Each referenced Mount holds a reference on its parent, which is the\nmount containing its mount point.\n-- VirtualFilesystem: A reference is held on each Mount that has not been\n- umounted.\n+- VirtualFilesystem: A reference is held on each Mount that has been connected\n+ to a mount point, but not yet umounted.\nMountNamespace and FileDescription references are held by users of VFS. The\nexpectation is that each `kernel.Task` holds a reference on its corresponding\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/mount.go", "new_path": "pkg/sentry/vfs/mount.go", "diff": "@@ -265,8 +265,8 @@ func (vfs *VirtualFilesystem) MountAt(ctx context.Context, creds *auth.Credentia\nif err != nil {\nreturn err\n}\n+ defer mnt.DecRef()\nif err := vfs.ConnectMountAt(ctx, creds, mnt, target); err != nil {\n- mnt.DecRef()\nreturn err\n}\nreturn nil\n@@ -394,8 +394,15 @@ func (vfs *VirtualFilesystem) umountRecursiveLocked(mnt *Mount, opts *umountRecu\n// references held by vd.\n//\n// Preconditions: vfs.mountMu must be locked. vfs.mounts.seq must be in a\n-// writer critical section. d.mu must be locked. mnt.parent() == nil.\n+// writer critical section. d.mu must be locked. mnt.parent() == nil, i.e. mnt\n+// must not already be connected.\nfunc (vfs *VirtualFilesystem) connectLocked(mnt *Mount, vd VirtualDentry, mntns *MountNamespace) {\n+ if checkInvariants {\n+ if mnt.parent() != nil {\n+ panic(\"VFS.connectLocked called on connected mount\")\n+ }\n+ }\n+ mnt.IncRef() // dropped by callers of umountRecursiveLocked\nmnt.storeKey(vd)\nif vd.mount.children == nil {\nvd.mount.children = make(map[*Mount]struct{})\n@@ -420,6 +427,11 @@ func (vfs *VirtualFilesystem) connectLocked(mnt *Mount, vd VirtualDentry, mntns\n// writer critical section. mnt.parent() != nil.\nfunc (vfs *VirtualFilesystem) disconnectLocked(mnt *Mount) VirtualDentry {\nvd := mnt.loadKey()\n+ if checkInvariants {\n+ if vd.mount != nil {\n+ panic(\"VFS.disconnectLocked called on disconnected mount\")\n+ }\n+ }\nmnt.storeKey(VirtualDentry{})\ndelete(vd.mount.children, mnt)\natomic.AddUint32(&vd.dentry.mounts, math.MaxUint32) // -1\n" } ]
Go
Apache License 2.0
google/gvisor
Take Mount reference in VFS.connectLocked. Updates #179 PiperOrigin-RevId: 314563830
259,858
03.06.2020 11:30:49
25,200
fa9c9055053172ba8653c9543e4195e0207c7543
Fix test release commands to work with older GPG. The --quick-generate-key command wasn't added until recently.
[ { "change_type": "MODIFY", "old_path": "Makefile", "new_path": "Makefile", "diff": "@@ -171,12 +171,20 @@ RELEASE_COMMIT :=\nRELEASE_NAME :=\nRELEASE_NOTES :=\n+GPG_TEST_OPTIONS := $(shell if gpg --pinentry-mode loopback --version >/dev/null 2>&1; then echo --pinentry-mode loopback; fi)\n$(RELEASE_KEY):\n@echo \"WARNING: Generating a key for testing ($@); don't use this.\"\nT=$$(mktemp /tmp/keyring.XXXXXX); \\\n- gpg --no-default-keyring --keyring $$T --batch --passphrase \"\" --quick-generate-key $(shell whoami) && \\\n- gpg --export-secret-keys --no-default-keyring --keyring $$T > $@; \\\n- rc=$$?; rm -f $$T; exit $$rc\n+ C=$$(mktemp /tmp/config.XXXXXX); \\\n+ echo Key-Type: DSA >> $$C && \\\n+ echo Key-Length: 1024 >> $$C && \\\n+ echo Name-Real: Test >> $$C && \\\n+ echo Name-Email: [email protected] >> $$C && \\\n+ echo Expire-Date: 0 >> $$C && \\\n+ echo %commit >> $$C && \\\n+ gpg --batch $(GPG_TEST_OPTIONS) --passphrase '' --no-default-keyring --keyring $$T --no-tty --gen-key $$C && \\\n+ gpg --batch $(GPG_TEST_OPTIONS) --export-secret-keys --no-default-keyring --keyring $$T --secret-keyring $$T > $@; \\\n+ rc=$$?; rm -f $$T $$C; exit $$rc\nrelease: $(RELEASE_KEY) ## Builds a release.\n@mkdir -p $(RELEASE_ROOT)\n" } ]
Go
Apache License 2.0
google/gvisor
Fix test release commands to work with older GPG. The --quick-generate-key command wasn't added until recently. PiperOrigin-RevId: 314570297
259,881
03.06.2020 11:33:39
25,200
b2e2a081a8a180764677111ae3c0b6179be81d31
Add metric for startup watchdog timeout
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/watchdog/watchdog.go", "new_path": "pkg/sentry/watchdog/watchdog.go", "diff": "@@ -77,7 +77,10 @@ var DefaultOpts = Opts{\n// trigger it.\nconst descheduleThreshold = 1 * time.Second\n-var stuckTasks = metric.MustCreateNewUint64Metric(\"/watchdog/stuck_tasks_detected\", true /* sync */, \"Cumulative count of stuck tasks detected\")\n+var (\n+ stuckStartup = metric.MustCreateNewUint64Metric(\"/watchdog/stuck_startup_detected\", true /* sync */, \"Incremented once on startup watchdog timeout\")\n+ stuckTasks = metric.MustCreateNewUint64Metric(\"/watchdog/stuck_tasks_detected\", true /* sync */, \"Cumulative count of stuck tasks detected\")\n+)\n// Amount of time to wait before dumping the stack to the log again when the same task(s) remains stuck.\nvar stackDumpSameTaskPeriod = time.Minute\n@@ -220,6 +223,9 @@ func (w *Watchdog) waitForStart() {\n// We are fine.\nreturn\n}\n+\n+ stuckStartup.Increment()\n+\nvar buf bytes.Buffer\nbuf.WriteString(fmt.Sprintf(\"Watchdog.Start() not called within %s\", w.StartupTimeout))\nw.doAction(w.StartupTimeoutAction, false, &buf)\n" } ]
Go
Apache License 2.0
google/gvisor
Add metric for startup watchdog timeout PiperOrigin-RevId: 314570894
259,975
03.06.2020 14:54:46
25,200
d8d86f0f3afdf0d46a556e7925ed54c5f4dc0bbf
Add test for O_TRUNC b/36576592 calls out an edge case previously not supported by HostFS. HostFS is currently being removed, meaning gVisor supports this feature. Simply add the test to open_test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/open.cc", "new_path": "test/syscalls/linux/open.cc", "diff": "@@ -416,6 +416,29 @@ TEST_F(OpenTest, CanTruncateWriteOnlyNoReadPermission_NoRandomSave) {\nEXPECT_EQ(stat.st_size, 0);\n}\n+TEST_F(OpenTest, CanTruncateWithStrangePermissions) {\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, false));\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_READ_SEARCH, false));\n+ const DisableSave ds; // Permissions are dropped.\n+ std::string path = NewTempAbsPath();\n+ int fd;\n+ // Create a file without user permissions.\n+ EXPECT_THAT( // SAVE_BELOW\n+ fd = open(path.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 055),\n+ SyscallSucceeds());\n+ EXPECT_THAT(close(fd), SyscallSucceeds());\n+\n+ // Cannot open file because we are owner and have no permissions set.\n+ EXPECT_THAT(open(path.c_str(), O_RDONLY), SyscallFailsWithErrno(EACCES));\n+\n+ // We *can* chmod the file, because we are the owner.\n+ EXPECT_THAT(chmod(path.c_str(), 0755), SyscallSucceeds());\n+\n+ // Now we can open the file again.\n+ EXPECT_THAT(fd = open(path.c_str(), O_RDWR), SyscallSucceeds());\n+ EXPECT_THAT(close(fd), SyscallSucceeds());\n+}\n+\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Add test for O_TRUNC b/36576592 calls out an edge case previously not supported by HostFS. HostFS is currently being removed, meaning gVisor supports this feature. Simply add the test to open_test. PiperOrigin-RevId: 314610226
259,853
04.06.2020 17:16:57
25,200
9e66ac4c20dcfa2f4aaa4b149736f34f6b2bc451
test/syscall: run hostnet tests in separate network namespaces A few tests use hard coded port numbers, so we need to guruantee that these ports will not be used for somthing else.
[ { "change_type": "MODIFY", "old_path": "test/runner/defs.bzl", "new_path": "test/runner/defs.bzl", "diff": "@@ -93,6 +93,7 @@ def _syscall_test(\n# we figure out how to request ipv4 sockets on Guitar machines.\nif network == \"host\":\ntags.append(\"noguitar\")\n+ tags.append(\"block-network\")\n# Disable off-host networking.\ntags.append(\"requires-net:loopback\")\n" } ]
Go
Apache License 2.0
google/gvisor
test/syscall: run hostnet tests in separate network namespaces A few tests use hard coded port numbers, so we need to guruantee that these ports will not be used for somthing else.
259,853
04.06.2020 12:01:22
25,200
d61e88e342d74d306a654b85d5ed791078f96f0a
Remove gvisor/pkg/tmutex It isn't used.
[ { "change_type": "DELETE", "old_path": "pkg/tmutex/BUILD", "new_path": null, "diff": "-load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n-\n-package(licenses = [\"notice\"])\n-\n-go_library(\n- name = \"tmutex\",\n- srcs = [\"tmutex.go\"],\n- visibility = [\"//:sandbox\"],\n-)\n-\n-go_test(\n- name = \"tmutex_test\",\n- size = \"medium\",\n- srcs = [\"tmutex_test.go\"],\n- library = \":tmutex\",\n- deps = [\"//pkg/sync\"],\n-)\n" }, { "change_type": "DELETE", "old_path": "pkg/tmutex/tmutex.go", "new_path": null, "diff": "-// Copyright 2018 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-// Package tmutex provides the implementation of a mutex that implements an\n-// efficient TryLock function in addition to Lock and Unlock.\n-package tmutex\n-\n-import (\n- \"sync/atomic\"\n-)\n-\n-// Mutex is a mutual exclusion primitive that implements TryLock in addition\n-// to Lock and Unlock.\n-type Mutex struct {\n- v int32\n- ch chan struct{}\n-}\n-\n-// Init initializes the mutex.\n-func (m *Mutex) Init() {\n- m.v = 1\n- m.ch = make(chan struct{}, 1)\n-}\n-\n-// Lock acquires the mutex. If it is currently held by another goroutine, Lock\n-// will wait until it has a chance to acquire it.\n-func (m *Mutex) Lock() {\n- // Uncontended case.\n- if atomic.AddInt32(&m.v, -1) == 0 {\n- return\n- }\n-\n- for {\n- // Try to acquire the mutex again, at the same time making sure\n- // that m.v is negative, which indicates to the owner of the\n- // lock that it is contended, which will force it to try to wake\n- // someone up when it releases the mutex.\n- if v := atomic.LoadInt32(&m.v); v >= 0 && atomic.SwapInt32(&m.v, -1) == 1 {\n- return\n- }\n-\n- // Wait for the mutex to be released before trying again.\n- <-m.ch\n- }\n-}\n-\n-// TryLock attempts to acquire the mutex without blocking. If the mutex is\n-// currently held by another goroutine, it fails to acquire it and returns\n-// false.\n-func (m *Mutex) TryLock() bool {\n- v := atomic.LoadInt32(&m.v)\n- if v <= 0 {\n- return false\n- }\n- return atomic.CompareAndSwapInt32(&m.v, 1, 0)\n-}\n-\n-// Unlock releases the mutex.\n-func (m *Mutex) Unlock() {\n- if atomic.SwapInt32(&m.v, 1) == 0 {\n- // There were no pending waiters.\n- return\n- }\n-\n- // Wake some waiter up.\n- select {\n- case m.ch <- struct{}{}:\n- default:\n- }\n-}\n" }, { "change_type": "DELETE", "old_path": "pkg/tmutex/tmutex_test.go", "new_path": null, "diff": "-// Copyright 2018 The gVisor Authors.\n-//\n-// Licensed under the Apache License, Version 2.0 (the \"License\");\n-// you may not use this file except in compliance with the License.\n-// You may obtain a copy of the License at\n-//\n-// http://www.apache.org/licenses/LICENSE-2.0\n-//\n-// Unless required by applicable law or agreed to in writing, software\n-// distributed under the License is distributed on an \"AS IS\" BASIS,\n-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-// See the License for the specific language governing permissions and\n-// limitations under the License.\n-\n-package tmutex\n-\n-import (\n- \"fmt\"\n- \"runtime\"\n- \"sync/atomic\"\n- \"testing\"\n- \"time\"\n-\n- \"gvisor.dev/gvisor/pkg/sync\"\n-)\n-\n-func TestBasicLock(t *testing.T) {\n- var m Mutex\n- m.Init()\n-\n- m.Lock()\n-\n- // Try blocking lock the mutex from a different goroutine. This must\n- // not block because the mutex is held.\n- ch := make(chan struct{}, 1)\n- go func() {\n- m.Lock()\n- ch <- struct{}{}\n- m.Unlock()\n- ch <- struct{}{}\n- }()\n-\n- select {\n- case <-ch:\n- t.Fatalf(\"Lock succeeded on locked mutex\")\n- case <-time.After(100 * time.Millisecond):\n- }\n-\n- // Unlock the mutex and make sure that the goroutine waiting on Lock()\n- // unblocks and succeeds.\n- m.Unlock()\n-\n- select {\n- case <-ch:\n- case <-time.After(100 * time.Millisecond):\n- t.Fatalf(\"Lock failed to acquire unlocked mutex\")\n- }\n-\n- // Make sure we can lock and unlock again.\n- m.Lock()\n- m.Unlock()\n-}\n-\n-func TestTryLock(t *testing.T) {\n- var m Mutex\n- m.Init()\n-\n- // Try to lock. It should succeed.\n- if !m.TryLock() {\n- t.Fatalf(\"TryLock failed on unlocked mutex\")\n- }\n-\n- // Try to lock again, it should now fail.\n- if m.TryLock() {\n- t.Fatalf(\"TryLock succeeded on locked mutex\")\n- }\n-\n- // Try blocking lock the mutex from a different goroutine. This must\n- // not block because the mutex is held.\n- ch := make(chan struct{}, 1)\n- go func() {\n- m.Lock()\n- ch <- struct{}{}\n- m.Unlock()\n- }()\n-\n- select {\n- case <-ch:\n- t.Fatalf(\"Lock succeeded on locked mutex\")\n- case <-time.After(100 * time.Millisecond):\n- }\n-\n- // Unlock the mutex and make sure that the goroutine waiting on Lock()\n- // unblocks and succeeds.\n- m.Unlock()\n-\n- select {\n- case <-ch:\n- case <-time.After(100 * time.Millisecond):\n- t.Fatalf(\"Lock failed to acquire unlocked mutex\")\n- }\n-}\n-\n-func TestMutualExclusion(t *testing.T) {\n- var m Mutex\n- m.Init()\n-\n- // Test mutual exclusion by running \"gr\" goroutines concurrently, and\n- // have each one increment a counter \"iters\" times within the critical\n- // section established by the mutex.\n- //\n- // If at the end the counter is not gr * iters, then we know that\n- // goroutines ran concurrently within the critical section.\n- //\n- // If one of the goroutines doesn't complete, it's likely a bug that\n- // causes to it to wait forever.\n- const gr = 1000\n- const iters = 100000\n- v := 0\n- var wg sync.WaitGroup\n- for i := 0; i < gr; i++ {\n- wg.Add(1)\n- go func() {\n- for j := 0; j < iters; j++ {\n- m.Lock()\n- v++\n- m.Unlock()\n- }\n- wg.Done()\n- }()\n- }\n-\n- wg.Wait()\n-\n- if v != gr*iters {\n- t.Fatalf(\"Bad count: got %v, want %v\", v, gr*iters)\n- }\n-}\n-\n-func TestMutualExclusionWithTryLock(t *testing.T) {\n- var m Mutex\n- m.Init()\n-\n- // Similar to the previous, with the addition of some goroutines that\n- // only increment the count if TryLock succeeds.\n- const gr = 1000\n- const iters = 100000\n- total := int64(gr * iters)\n- var tryTotal int64\n- v := int64(0)\n- var wg sync.WaitGroup\n- for i := 0; i < gr; i++ {\n- wg.Add(2)\n- go func() {\n- for j := 0; j < iters; j++ {\n- m.Lock()\n- v++\n- m.Unlock()\n- }\n- wg.Done()\n- }()\n- go func() {\n- local := int64(0)\n- for j := 0; j < iters; j++ {\n- if m.TryLock() {\n- v++\n- m.Unlock()\n- local++\n- }\n- }\n- atomic.AddInt64(&tryTotal, local)\n- wg.Done()\n- }()\n- }\n-\n- wg.Wait()\n-\n- t.Logf(\"tryTotal = %d\", tryTotal)\n- total += tryTotal\n-\n- if v != total {\n- t.Fatalf(\"Bad count: got %v, want %v\", v, total)\n- }\n-}\n-\n-// BenchmarkTmutex is equivalent to TestMutualExclusion, with the following\n-// differences:\n-//\n-// - The number of goroutines is variable, with the maximum value depending on\n-// GOMAXPROCS.\n-//\n-// - The number of iterations per benchmark is controlled by the benchmarking\n-// framework.\n-//\n-// - Care is taken to ensure that all goroutines participating in the benchmark\n-// have been created before the benchmark begins.\n-func BenchmarkTmutex(b *testing.B) {\n- for n, max := 1, 4*runtime.GOMAXPROCS(0); n > 0 && n <= max; n *= 2 {\n- b.Run(fmt.Sprintf(\"%d\", n), func(b *testing.B) {\n- var m Mutex\n- m.Init()\n-\n- var ready sync.WaitGroup\n- begin := make(chan struct{})\n- var end sync.WaitGroup\n- for i := 0; i < n; i++ {\n- ready.Add(1)\n- end.Add(1)\n- go func() {\n- ready.Done()\n- <-begin\n- for j := 0; j < b.N; j++ {\n- m.Lock()\n- m.Unlock()\n- }\n- end.Done()\n- }()\n- }\n-\n- ready.Wait()\n- b.ResetTimer()\n- close(begin)\n- end.Wait()\n- })\n- }\n-}\n-\n-// BenchmarkSyncMutex is equivalent to BenchmarkTmutex, but uses sync.Mutex as\n-// a comparison point.\n-func BenchmarkSyncMutex(b *testing.B) {\n- for n, max := 1, 4*runtime.GOMAXPROCS(0); n > 0 && n <= max; n *= 2 {\n- b.Run(fmt.Sprintf(\"%d\", n), func(b *testing.B) {\n- var m sync.Mutex\n-\n- var ready sync.WaitGroup\n- begin := make(chan struct{})\n- var end sync.WaitGroup\n- for i := 0; i < n; i++ {\n- ready.Add(1)\n- end.Add(1)\n- go func() {\n- ready.Done()\n- <-begin\n- for j := 0; j < b.N; j++ {\n- m.Lock()\n- m.Unlock()\n- }\n- end.Done()\n- }()\n- }\n-\n- ready.Wait()\n- b.ResetTimer()\n- close(begin)\n- end.Wait()\n- })\n- }\n-}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove gvisor/pkg/tmutex It isn't used. PiperOrigin-RevId: 314775492
260,003
04.06.2020 15:38:33
25,200
41da7a568b1e4f46b3bc09724996556fb18b4d16
Fix copylocks error about copying IPTables. IPTables.connections contains a sync.RWMutex. Copying it will trigger copylocks analysis. Tested by manually enabling nogo tests. sync.RWMutex is added to IPTables for the additional race condition discovered.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/netfilter.go", "new_path": "pkg/sentry/socket/netfilter/netfilter.go", "diff": "@@ -144,31 +144,27 @@ func GetEntries(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen\n}\nfunc findTable(stk *stack.Stack, tablename linux.TableName) (stack.Table, error) {\n- ipt := stk.IPTables()\n- table, ok := ipt.Tables[tablename.String()]\n+ table, ok := stk.IPTables().GetTable(tablename.String())\nif !ok {\nreturn stack.Table{}, fmt.Errorf(\"couldn't find table %q\", tablename)\n}\nreturn table, nil\n}\n-// FillDefaultIPTables sets stack's IPTables to the default tables and\n-// populates them with metadata.\n-func FillDefaultIPTables(stk *stack.Stack) {\n- ipt := stack.DefaultTables()\n-\n+// FillIPTablesMetadata populates stack's IPTables with metadata.\n+func FillIPTablesMetadata(stk *stack.Stack) {\n+ stk.IPTables().ModifyTables(func(tables map[string]stack.Table) {\n// In order to fill in the metadata, we have to translate ipt from its\n// netstack format to Linux's giant-binary-blob format.\n- for name, table := range ipt.Tables {\n+ for name, table := range tables {\n_, metadata, err := convertNetstackToBinary(name, table)\nif err != nil {\npanic(fmt.Errorf(\"Unable to set default IP tables: %v\", err))\n}\ntable.SetMetadata(metadata)\n- ipt.Tables[name] = table\n+ tables[name] = table\n}\n-\n- stk.SetIPTables(ipt)\n+ })\n}\n// convertNetstackToBinary converts the iptables as stored in netstack to the\n@@ -573,15 +569,13 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n// - There are no chains without an unconditional final rule.\n// - There are no chains without an unconditional underflow rule.\n- ipt := stk.IPTables()\ntable.SetMetadata(metadata{\nHookEntry: replace.HookEntry,\nUnderflow: replace.Underflow,\nNumEntries: replace.NumEntries,\nSize: replace.Size,\n})\n- ipt.Tables[replace.Name.String()] = table\n- stk.SetIPTables(ipt)\n+ stk.IPTables().ReplaceTable(replace.Name.String(), table)\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/stack.go", "new_path": "pkg/sentry/socket/netstack/stack.go", "diff": "@@ -362,14 +362,13 @@ func (s *Stack) RouteTable() []inet.Route {\n}\n// IPTables returns the stack's iptables.\n-func (s *Stack) IPTables() (stack.IPTables, error) {\n+func (s *Stack) IPTables() (*stack.IPTables, error) {\nreturn s.Stack.IPTables(), nil\n}\n-// FillDefaultIPTables sets the stack's iptables to the default tables, which\n-// allow and do not modify all traffic.\n-func (s *Stack) FillDefaultIPTables() {\n- netfilter.FillDefaultIPTables(s.Stack)\n+// FillIPTablesMetadata populates stack's IPTables with metadata.\n+func (s *Stack) FillIPTablesMetadata() {\n+ netfilter.FillIPTablesMetadata(s.Stack)\n}\n// Resume implements inet.Stack.Resume.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables.go", "new_path": "pkg/tcpip/stack/iptables.go", "diff": "@@ -43,11 +43,11 @@ const HookUnset = -1\n// DefaultTables returns a default set of tables. Each chain is set to accept\n// all packets.\n-func DefaultTables() IPTables {\n+func DefaultTables() *IPTables {\n// TODO(gvisor.dev/issue/170): We may be able to swap out some strings for\n// iotas.\n- return IPTables{\n- Tables: map[string]Table{\n+ return &IPTables{\n+ tables: map[string]Table{\nTablenameNat: Table{\nRules: []Rule{\nRule{Target: AcceptTarget{}},\n@@ -106,7 +106,7 @@ func DefaultTables() IPTables {\nUserChains: map[string]int{},\n},\n},\n- Priorities: map[Hook][]string{\n+ priorities: map[Hook][]string{\nInput: []string{TablenameNat, TablenameFilter},\nPrerouting: []string{TablenameMangle, TablenameNat},\nOutput: []string{TablenameMangle, TablenameNat, TablenameFilter},\n@@ -158,6 +158,36 @@ func EmptyNatTable() Table {\n}\n}\n+// GetTable returns table by name.\n+func (it *IPTables) GetTable(name string) (Table, bool) {\n+ it.mu.RLock()\n+ defer it.mu.RUnlock()\n+ t, ok := it.tables[name]\n+ return t, ok\n+}\n+\n+// ReplaceTable replaces or inserts table by name.\n+func (it *IPTables) ReplaceTable(name string, table Table) {\n+ it.mu.Lock()\n+ defer it.mu.Unlock()\n+ it.tables[name] = table\n+}\n+\n+// ModifyTables acquires write-lock and calls fn with internal name-to-table\n+// map. This function can be used to update multiple tables atomically.\n+func (it *IPTables) ModifyTables(fn func(map[string]Table)) {\n+ it.mu.Lock()\n+ defer it.mu.Unlock()\n+ fn(it.tables)\n+}\n+\n+// GetPriorities returns slice of priorities associated with hook.\n+func (it *IPTables) GetPriorities(hook Hook) []string {\n+ it.mu.RLock()\n+ defer it.mu.RUnlock()\n+ return it.priorities[hook]\n+}\n+\n// A chainVerdict is what a table decides should be done with a packet.\ntype chainVerdict int\n@@ -184,8 +214,8 @@ func (it *IPTables) Check(hook Hook, pkt *PacketBuffer, gso *GSO, r *Route, addr\nit.connections.HandlePacket(pkt, hook, gso, r)\n// Go through each table containing the hook.\n- for _, tablename := range it.Priorities[hook] {\n- table := it.Tables[tablename]\n+ for _, tablename := range it.GetPriorities(hook) {\n+ table, _ := it.GetTable(tablename)\nruleIdx := table.BuiltinChains[hook]\nswitch verdict := it.checkChain(hook, pkt, table, ruleIdx, gso, r, address, nicName); verdict {\n// If the table returns Accept, move on to the next table.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables_types.go", "new_path": "pkg/tcpip/stack/iptables_types.go", "diff": "@@ -16,6 +16,7 @@ package stack\nimport (\n\"strings\"\n+ \"sync\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n@@ -78,13 +79,17 @@ const (\n// IPTables holds all the tables for a netstack.\ntype IPTables struct {\n- // Tables maps table names to tables. User tables have arbitrary names.\n- Tables map[string]Table\n+ // mu protects tables and priorities.\n+ mu sync.RWMutex\n- // Priorities maps each hook to a list of table names. The order of the\n+ // tables maps table names to tables. User tables have arbitrary names. mu\n+ // needs to be locked for accessing.\n+ tables map[string]Table\n+\n+ // priorities maps each hook to a list of table names. The order of the\n// list is the order in which each table should be visited for that\n- // hook.\n- Priorities map[Hook][]string\n+ // hook. mu needs to be locked for accessing.\n+ priorities map[Hook][]string\nconnections ConnTrackTable\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -424,12 +424,8 @@ type Stack struct {\n// handleLocal allows non-loopback interfaces to loop packets.\nhandleLocal bool\n- // tablesMu protects iptables.\n- tablesMu sync.RWMutex\n-\n- // tables are the iptables packet filtering and manipulation rules. The are\n- // protected by tablesMu.`\n- tables IPTables\n+ // tables are the iptables packet filtering and manipulation rules.\n+ tables *IPTables\n// resumableEndpoints is a list of endpoints that need to be resumed if the\n// stack is being restored.\n@@ -676,6 +672,7 @@ func New(opts Options) *Stack {\nclock: clock,\nstats: opts.Stats.FillIn(),\nhandleLocal: opts.HandleLocal,\n+ tables: DefaultTables(),\nicmpRateLimiter: NewICMPRateLimiter(),\nseed: generateRandUint32(),\nndpConfigs: opts.NDPConfigs,\n@@ -1741,18 +1738,8 @@ func (s *Stack) IsInGroup(nicID tcpip.NICID, multicastAddr tcpip.Address) (bool,\n}\n// IPTables returns the stack's iptables.\n-func (s *Stack) IPTables() IPTables {\n- s.tablesMu.RLock()\n- t := s.tables\n- s.tablesMu.RUnlock()\n- return t\n-}\n-\n-// SetIPTables sets the stack's iptables.\n-func (s *Stack) SetIPTables(ipt IPTables) {\n- s.tablesMu.Lock()\n- s.tables = ipt\n- s.tablesMu.Unlock()\n+func (s *Stack) IPTables() *IPTables {\n+ return s.tables\n}\n// ICMPLimit returns the maximum number of ICMP messages that can be sent\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/icmp/endpoint.go", "new_path": "pkg/tcpip/transport/icmp/endpoint.go", "diff": "@@ -140,11 +140,6 @@ func (e *endpoint) SetOwner(owner tcpip.PacketOwner) {\ne.owner = owner\n}\n-// IPTables implements tcpip.Endpoint.IPTables.\n-func (e *endpoint) IPTables() (stack.IPTables, error) {\n- return e.stack.IPTables(), nil\n-}\n-\n// Read reads data from the endpoint. This method does not block if\n// there is no data pending.\nfunc (e *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/packet/endpoint.go", "new_path": "pkg/tcpip/transport/packet/endpoint.go", "diff": "@@ -132,11 +132,6 @@ func (ep *endpoint) Close() {\n// ModerateRecvBuf implements tcpip.Endpoint.ModerateRecvBuf.\nfunc (ep *endpoint) ModerateRecvBuf(copied int) {}\n-// IPTables implements tcpip.Endpoint.IPTables.\n-func (ep *endpoint) IPTables() (stack.IPTables, error) {\n- return ep.stack.IPTables(), nil\n-}\n-\n// Read implements tcpip.Endpoint.Read.\nfunc (ep *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\nep.rcvMu.Lock()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/raw/endpoint.go", "new_path": "pkg/tcpip/transport/raw/endpoint.go", "diff": "@@ -166,11 +166,6 @@ func (e *endpoint) SetOwner(owner tcpip.PacketOwner) {\ne.owner = owner\n}\n-// IPTables implements tcpip.Endpoint.IPTables.\n-func (e *endpoint) IPTables() (stack.IPTables, error) {\n- return e.stack.IPTables(), nil\n-}\n-\n// Read implements tcpip.Endpoint.Read.\nfunc (e *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\nif !e.associated {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -1172,11 +1172,6 @@ func (e *endpoint) SetOwner(owner tcpip.PacketOwner) {\ne.owner = owner\n}\n-// IPTables implements tcpip.Endpoint.IPTables.\n-func (e *endpoint) IPTables() (stack.IPTables, error) {\n- return e.stack.IPTables(), nil\n-}\n-\n// Read reads data from the endpoint.\nfunc (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\ne.LockUser()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/udp/endpoint.go", "new_path": "pkg/tcpip/transport/udp/endpoint.go", "diff": "@@ -247,11 +247,6 @@ func (e *endpoint) Close() {\n// ModerateRecvBuf implements tcpip.Endpoint.ModerateRecvBuf.\nfunc (e *endpoint) ModerateRecvBuf(copied int) {}\n-// IPTables implements tcpip.Endpoint.IPTables.\n-func (e *endpoint) IPTables() (stack.IPTables, error) {\n- return e.stack.IPTables(), nil\n-}\n-\n// Read reads data from the endpoint. This method does not block if\n// there is no data pending.\nfunc (e *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, *tcpip.Error) {\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -1056,7 +1056,7 @@ func newEmptySandboxNetworkStack(clock tcpip.Clock, uniqueID stack.UniqueID) (in\nreturn nil, fmt.Errorf(\"SetTransportProtocolOption failed: %v\", err)\n}\n- s.FillDefaultIPTables()\n+ s.FillIPTablesMetadata()\nreturn &s, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix copylocks error about copying IPTables. IPTables.connections contains a sync.RWMutex. Copying it will trigger copylocks analysis. Tested by manually enabling nogo tests. sync.RWMutex is added to IPTables for the additional race condition discovered. PiperOrigin-RevId: 314817019
259,985
05.06.2020 14:26:16
25,200
6d9a68ca41eb8470dd492624916fcf50382050fa
Centralize the categories of endpoint states.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -63,7 +63,8 @@ const (\nStateClosing\n)\n-// connected is the set of states where an endpoint is connected to a peer.\n+// connected returns true when s is one of the states representing an\n+// endpoint connected to a peer.\nfunc (s EndpointState) connected() bool {\nswitch s {\ncase StateEstablished, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing:\n@@ -73,6 +74,40 @@ func (s EndpointState) connected() bool {\n}\n}\n+// connecting returns true when s is one of the states representing a\n+// connection in progress, but not yet fully established.\n+func (s EndpointState) connecting() bool {\n+ switch s {\n+ case StateConnecting, StateSynSent, StateSynRecv:\n+ return true\n+ default:\n+ return false\n+ }\n+}\n+\n+// handshake returns true when s is one of the states representing an endpoint\n+// in the middle of a TCP handshake.\n+func (s EndpointState) handshake() bool {\n+ switch s {\n+ case StateSynSent, StateSynRecv:\n+ return true\n+ default:\n+ return false\n+ }\n+}\n+\n+// closed returns true when s is one of the states an endpoint transitions to\n+// when closed or when it encounters an error. This is distinct from a newly\n+// initialized endpoint that was never connected.\n+func (s EndpointState) closed() bool {\n+ switch s {\n+ case StateClose, StateError:\n+ return true\n+ default:\n+ return false\n+ }\n+}\n+\n// String implements fmt.Stringer.String.\nfunc (s EndpointState) String() string {\nswitch s {\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint_state.go", "new_path": "pkg/tcpip/transport/tcp/endpoint_state.go", "diff": "@@ -49,11 +49,10 @@ func (e *endpoint) beforeSave() {\ne.mu.Lock()\ndefer e.mu.Unlock()\n- switch e.EndpointState() {\n- case StateInitial, StateBound:\n- // TODO(b/138137272): this enumeration duplicates\n- // EndpointState.connected. remove it.\n- case StateEstablished, StateSynSent, StateSynRecv, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing:\n+ epState := e.EndpointState()\n+ switch {\n+ case epState == StateInitial || epState == StateBound:\n+ case epState.connected() || epState.handshake():\nif e.route.Capabilities()&stack.CapabilitySaveRestore == 0 {\nif e.route.Capabilities()&stack.CapabilityDisconnectOk == 0 {\npanic(tcpip.ErrSaveRejection{fmt.Errorf(\"endpoint cannot be saved in connected state: local %v:%d, remote %v:%d\", e.ID.LocalAddress, e.ID.LocalPort, e.ID.RemoteAddress, e.ID.RemotePort)})\n@@ -69,15 +68,16 @@ func (e *endpoint) beforeSave() {\nbreak\n}\nfallthrough\n- case StateListen, StateConnecting:\n+ case epState == StateListen || epState == StateConnecting:\ne.drainSegmentLocked()\n- if e.EndpointState() != StateClose && e.EndpointState() != StateError {\n+ // Refresh epState, since drainSegmentLocked may have changed it.\n+ epState = e.EndpointState()\n+ if !epState.closed() {\nif !e.workerRunning {\npanic(\"endpoint has no worker running in listen, connecting, or connected state\")\n}\n- break\n}\n- case StateError, StateClose:\n+ case epState.closed():\nfor e.workerRunning {\ne.mu.Unlock()\ntime.Sleep(100 * time.Millisecond)\n@@ -148,23 +148,23 @@ var connectingLoading sync.WaitGroup\n// Bound endpoint loading happens last.\n// loadState is invoked by stateify.\n-func (e *endpoint) loadState(state EndpointState) {\n+func (e *endpoint) loadState(epState EndpointState) {\n// This is to ensure that the loading wait groups include all applicable\n// endpoints before any asynchronous calls to the Wait() methods.\n// For restore purposes we treat TimeWait like a connected endpoint.\n- if state.connected() || state == StateTimeWait {\n+ if epState.connected() || epState == StateTimeWait {\nconnectedLoading.Add(1)\n}\n- switch state {\n- case StateListen:\n+ switch {\n+ case epState == StateListen:\nlistenLoading.Add(1)\n- case StateConnecting, StateSynSent, StateSynRecv:\n+ case epState.connecting():\nconnectingLoading.Add(1)\n}\n// Directly update the state here rather than using e.setEndpointState\n// as the endpoint is still being loaded and the stack reference is not\n// yet initialized.\n- atomic.StoreUint32((*uint32)(&e.state), uint32(state))\n+ atomic.StoreUint32((*uint32)(&e.state), uint32(epState))\n}\n// afterLoad is invoked by stateify.\n@@ -183,8 +183,8 @@ func (e *endpoint) afterLoad() {\nfunc (e *endpoint) Resume(s *stack.Stack) {\ne.stack = s\ne.segmentQueue.setLimit(MaxUnprocessedSegments)\n- state := e.origEndpointState\n- switch state {\n+ epState := e.origEndpointState\n+ switch epState {\ncase StateInitial, StateBound, StateListen, StateConnecting, StateEstablished:\nvar ss SendBufferSizeOption\nif err := e.stack.TransportProtocolOption(ProtocolNumber, &ss); err == nil {\n@@ -208,8 +208,8 @@ func (e *endpoint) Resume(s *stack.Stack) {\n}\n}\n- switch state {\n- case StateEstablished, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing:\n+ switch {\n+ case epState.connected():\nbind()\nif len(e.connectingAddress) == 0 {\ne.connectingAddress = e.ID.RemoteAddress\n@@ -232,13 +232,13 @@ func (e *endpoint) Resume(s *stack.Stack) {\nclosed := e.closed\ne.mu.Unlock()\ne.notifyProtocolGoroutine(notifyTickleWorker)\n- if state == StateFinWait2 && closed {\n+ if epState == StateFinWait2 && closed {\n// If the endpoint has been closed then make sure we notify so\n// that the FIN_WAIT2 timer is started after a restore.\ne.notifyProtocolGoroutine(notifyClose)\n}\nconnectedLoading.Done()\n- case StateListen:\n+ case epState == StateListen:\ntcpip.AsyncLoading.Add(1)\ngo func() {\nconnectedLoading.Wait()\n@@ -255,7 +255,7 @@ func (e *endpoint) Resume(s *stack.Stack) {\nlistenLoading.Done()\ntcpip.AsyncLoading.Done()\n}()\n- case StateConnecting, StateSynSent, StateSynRecv:\n+ case epState.connecting():\ntcpip.AsyncLoading.Add(1)\ngo func() {\nconnectedLoading.Wait()\n@@ -267,7 +267,7 @@ func (e *endpoint) Resume(s *stack.Stack) {\nconnectingLoading.Done()\ntcpip.AsyncLoading.Done()\n}()\n- case StateBound:\n+ case epState == StateBound:\ntcpip.AsyncLoading.Add(1)\ngo func() {\nconnectedLoading.Wait()\n@@ -276,7 +276,7 @@ func (e *endpoint) Resume(s *stack.Stack) {\nbind()\ntcpip.AsyncLoading.Done()\n}()\n- case StateClose:\n+ case epState == StateClose:\nif e.isPortReserved {\ntcpip.AsyncLoading.Add(1)\ngo func() {\n@@ -291,12 +291,11 @@ func (e *endpoint) Resume(s *stack.Stack) {\ne.state = StateClose\ne.stack.CompleteTransportEndpointCleanup(e)\ntcpip.DeleteDanglingEndpoint(e)\n- case StateError:\n+ case epState == StateError:\ne.state = StateError\ne.stack.CompleteTransportEndpointCleanup(e)\ntcpip.DeleteDanglingEndpoint(e)\n}\n-\n}\n// saveLastError is invoked by stateify.\n" } ]
Go
Apache License 2.0
google/gvisor
Centralize the categories of endpoint states. PiperOrigin-RevId: 314996457
259,853
05.06.2020 14:43:56
25,200
8c1f5b5cd8b634a5e7255944f42e82c5c9de3149
Unshare files on exec The current task can share its fdtable with a few other tasks, but after exec, this should be a completely separate process.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/task_exec.go", "new_path": "pkg/sentry/kernel/task_exec.go", "diff": "@@ -198,6 +198,10 @@ func (r *runSyscallAfterExecStop) execute(t *Task) taskRunState {\nt.tg.oldRSeqCritical.Store(&OldRSeqCriticalRegion{})\nt.tg.pidns.owner.mu.Unlock()\n+ oldFDTable := t.fdTable\n+ t.fdTable = t.fdTable.Fork()\n+ oldFDTable.DecRef()\n+\n// Remove FDs with the CloseOnExec flag set.\nt.fdTable.RemoveIf(func(_ *fs.File, _ *vfs.FileDescription, flags FDFlags) bool {\nreturn flags.CloseOnExec\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/exec.cc", "new_path": "test/syscalls/linux/exec.cc", "diff": "@@ -673,6 +673,33 @@ TEST(ExecveatTest, SymlinkNoFollowWithRelativePath) {\nEXPECT_EQ(execve_errno, ELOOP);\n}\n+TEST(ExecveatTest, UnshareFiles) {\n+ TempPath tempFile = ASSERT_NO_ERRNO_AND_VALUE(\n+ TempPath::CreateFileWith(GetAbsoluteTestTmpdir(), \"bar\", 0755));\n+ const FileDescriptor fd_closed_on_exec =\n+ ASSERT_NO_ERRNO_AND_VALUE(Open(tempFile.path(), O_RDONLY | O_CLOEXEC));\n+\n+ pid_t child;\n+ EXPECT_THAT(child = syscall(__NR_clone, SIGCHLD | CLONE_VFORK | CLONE_FILES,\n+ 0, 0, 0, 0),\n+ SyscallSucceeds());\n+ if (child == 0) {\n+ ExecveArray argv = {\"test\"};\n+ ExecveArray envp;\n+ ASSERT_THAT(\n+ execve(RunfilePath(kBasicWorkload).c_str(), argv.get(), envp.get()),\n+ SyscallSucceeds());\n+ _exit(1);\n+ }\n+\n+ int status;\n+ ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());\n+ EXPECT_EQ(status, 0);\n+\n+ struct stat st;\n+ EXPECT_THAT(fstat(fd_closed_on_exec.get(), &st), SyscallSucceeds());\n+}\n+\nTEST(ExecveatTest, SymlinkNoFollowWithAbsolutePath) {\nstd::string parent_dir = \"/tmp\";\nTempPath link = ASSERT_NO_ERRNO_AND_VALUE(\n" } ]
Go
Apache License 2.0
google/gvisor
Unshare files on exec The current task can share its fdtable with a few other tasks, but after exec, this should be a completely separate process. PiperOrigin-RevId: 314999565
259,858
05.06.2020 16:37:24
25,200
f385e581a51e7f9800fe7b41835d58e45e29aefa
Drop flaky tag.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/adapters/gonet/BUILD", "new_path": "pkg/tcpip/adapters/gonet/BUILD", "diff": "@@ -22,7 +22,6 @@ go_test(\nsize = \"small\",\nsrcs = [\"gonet_test.go\"],\nlibrary = \":gonet\",\n- tags = [\"flaky\"],\ndeps = [\n\"//pkg/tcpip\",\n\"//pkg/tcpip/header\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/BUILD", "new_path": "pkg/tcpip/transport/tcp/BUILD", "diff": "@@ -86,10 +86,6 @@ go_test(\n\"tcp_test.go\",\n\"tcp_timestamp_test.go\",\n],\n- # FIXME(b/68809571)\n- tags = [\n- \"flaky\",\n- ],\ndeps = [\n\":tcp\",\n\"//pkg/sync\",\n" } ]
Go
Apache License 2.0
google/gvisor
Drop flaky tag. PiperOrigin-RevId: 315018295
259,891
03.06.2020 19:57:39
25,200
74a7d76c9777820fcd7bd6002481eb959f58e247
iptables: loopback traffic skips prerouting chain Loopback traffic is not affected by rules in the PREROUTING chain. This change is also necessary for istio's envoy to talk to other components in the same pod.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/network/ipv4/ipv4.go", "new_path": "pkg/tcpip/network/ipv4/ipv4.go", "diff": "@@ -258,38 +258,24 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, params stack.Netw\nreturn nil\n}\n- if pkt.NatDone {\n// If the packet is manipulated as per NAT Ouput rules, handle packet\n// based on destination address and do not send the packet to link layer.\n+ // TODO(gvisor.dev/issue/170): We should do this for every packet, rather than\n+ // only NATted packets, but removing this check short circuits broadcasts\n+ // before they are sent out to other hosts.\n+ if pkt.NatDone {\nnetHeader := header.IPv4(pkt.NetworkHeader)\nep, err := e.stack.FindNetworkEndpoint(header.IPv4ProtocolNumber, netHeader.DestinationAddress())\nif err == nil {\n- src := netHeader.SourceAddress()\n- dst := netHeader.DestinationAddress()\n- route := r.ReverseRoute(src, dst)\n-\n- views := make([]buffer.View, 1, 1+len(pkt.Data.Views()))\n- views[0] = pkt.Header.View()\n- views = append(views, pkt.Data.Views()...)\n- ep.HandlePacket(&route, &stack.PacketBuffer{\n- Data: buffer.NewVectorisedView(len(views[0])+pkt.Data.Size(), views),\n- })\n+ route := r.ReverseRoute(netHeader.SourceAddress(), netHeader.DestinationAddress())\n+ handleLoopback(&route, pkt, ep)\nreturn nil\n}\n}\nif r.Loop&stack.PacketLoop != 0 {\n- // The inbound path expects the network header to still be in\n- // the PacketBuffer's Data field.\n- views := make([]buffer.View, 1, 1+len(pkt.Data.Views()))\n- views[0] = pkt.Header.View()\n- views = append(views, pkt.Data.Views()...)\nloopedR := r.MakeLoopedRoute()\n-\n- e.HandlePacket(&loopedR, &stack.PacketBuffer{\n- Data: buffer.NewVectorisedView(len(views[0])+pkt.Data.Size(), views),\n- })\n-\n+ handleLoopback(&loopedR, pkt, e)\nloopedR.Release()\n}\nif r.Loop&stack.PacketOut == 0 {\n@@ -305,6 +291,17 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, params stack.Netw\nreturn nil\n}\n+func handleLoopback(route *stack.Route, pkt *stack.PacketBuffer, ep stack.NetworkEndpoint) {\n+ // The inbound path expects the network header to still be in\n+ // the PacketBuffer's Data field.\n+ views := make([]buffer.View, 1, 1+len(pkt.Data.Views()))\n+ views[0] = pkt.Header.View()\n+ views = append(views, pkt.Data.Views()...)\n+ ep.HandlePacket(route, &stack.PacketBuffer{\n+ Data: buffer.NewVectorisedView(len(views[0])+pkt.Data.Size(), views),\n+ })\n+}\n+\n// WritePackets implements stack.NetworkEndpoint.WritePackets.\nfunc (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.PacketBufferList, params stack.NetworkHeaderParams) (int, *tcpip.Error) {\nif r.Loop&stack.PacketLoop != 0 {\n@@ -347,13 +344,7 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\nsrc := netHeader.SourceAddress()\ndst := netHeader.DestinationAddress()\nroute := r.ReverseRoute(src, dst)\n-\n- views := make([]buffer.View, 1, 1+len(pkt.Data.Views()))\n- views[0] = pkt.Header.View()\n- views = append(views, pkt.Data.Views()...)\n- ep.HandlePacket(&route, &stack.PacketBuffer{\n- Data: buffer.NewVectorisedView(len(views[0])+pkt.Data.Size(), views),\n- })\n+ handleLoopback(&route, pkt, ep)\nn++\ncontinue\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -1229,7 +1229,8 @@ func (n *NIC) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcp\n}\n// TODO(gvisor.dev/issue/170): Not supporting iptables for IPv6 yet.\n- if protocol == header.IPv4ProtocolNumber {\n+ // Loopback traffic skips the prerouting chain.\n+ if protocol == header.IPv4ProtocolNumber && !n.isLoopback() {\n// iptables filtering.\nipt := n.stack.IPTables()\naddress := n.primaryAddress(protocol)\n" }, { "change_type": "MODIFY", "old_path": "test/iptables/iptables_test.go", "new_path": "test/iptables/iptables_test.go", "diff": "@@ -303,6 +303,10 @@ func TestNATRedirectRequiresProtocol(t *testing.T) {\nsingleTest(t, NATRedirectRequiresProtocol{})\n}\n+func TestNATLoopbackSkipsPrerouting(t *testing.T) {\n+ singleTest(t, NATLoopbackSkipsPrerouting{})\n+}\n+\nfunc TestInputSource(t *testing.T) {\nsingleTest(t, FilterInputSource{})\n}\n" }, { "change_type": "MODIFY", "old_path": "test/iptables/nat.go", "new_path": "test/iptables/nat.go", "diff": "@@ -39,6 +39,7 @@ func init() {\nRegisterTestCase(NATOutDontRedirectIP{})\nRegisterTestCase(NATOutRedirectInvert{})\nRegisterTestCase(NATRedirectRequiresProtocol{})\n+ RegisterTestCase(NATLoopbackSkipsPrerouting{})\n}\n// NATPreRedirectUDPPort tests that packets are redirected to different port.\n@@ -326,32 +327,6 @@ func (NATRedirectRequiresProtocol) LocalAction(ip net.IP) error {\nreturn nil\n}\n-// loopbackTests runs an iptables rule and ensures that packets sent to\n-// dest:dropPort are received by localhost:acceptPort.\n-func loopbackTest(dest net.IP, args ...string) error {\n- if err := natTable(args...); err != nil {\n- return err\n- }\n- sendCh := make(chan error)\n- listenCh := make(chan error)\n- go func() {\n- sendCh <- sendUDPLoop(dest, dropPort, sendloopDuration)\n- }()\n- go func() {\n- listenCh <- listenUDP(acceptPort, sendloopDuration)\n- }()\n- select {\n- case err := <-listenCh:\n- if err != nil {\n- return err\n- }\n- case <-time.After(sendloopDuration):\n- return errors.New(\"timed out\")\n- }\n- // sendCh will always take the full sendloop time.\n- return <-sendCh\n-}\n-\n// NATOutRedirectTCPPort tests that connections are redirected on specified ports.\ntype NATOutRedirectTCPPort struct{}\n@@ -400,3 +375,65 @@ func (NATOutRedirectTCPPort) ContainerAction(ip net.IP) error {\nfunc (NATOutRedirectTCPPort) LocalAction(ip net.IP) error {\nreturn nil\n}\n+\n+// NATLoopbackSkipsPrerouting tests that packets sent via loopback aren't\n+// affected by PREROUTING rules.\n+type NATLoopbackSkipsPrerouting struct{}\n+\n+// Name implements TestCase.Name.\n+func (NATLoopbackSkipsPrerouting) Name() string {\n+ return \"NATLoopbackSkipsPrerouting\"\n+}\n+\n+// ContainerAction implements TestCase.ContainerAction.\n+func (NATLoopbackSkipsPrerouting) ContainerAction(ip net.IP) error {\n+ // Redirect anything sent to localhost to an unused port.\n+ dest := []byte{127, 0, 0, 1}\n+ if err := natTable(\"-A\", \"PREROUTING\", \"-p\", \"tcp\", \"-j\", \"REDIRECT\", \"--to-port\", fmt.Sprintf(\"%d\", dropPort)); err != nil {\n+ return err\n+ }\n+\n+ // Establish a connection via localhost. If the PREROUTING rule did apply to\n+ // loopback traffic, the connection would fail.\n+ sendCh := make(chan error)\n+ go func() {\n+ sendCh <- connectTCP(dest, acceptPort, sendloopDuration)\n+ }()\n+\n+ if err := listenTCP(acceptPort, sendloopDuration); err != nil {\n+ return err\n+ }\n+ return <-sendCh\n+}\n+\n+// LocalAction implements TestCase.LocalAction.\n+func (NATLoopbackSkipsPrerouting) LocalAction(ip net.IP) error {\n+ // No-op.\n+ return nil\n+}\n+\n+// loopbackTests runs an iptables rule and ensures that packets sent to\n+// dest:dropPort are received by localhost:acceptPort.\n+func loopbackTest(dest net.IP, args ...string) error {\n+ if err := natTable(args...); err != nil {\n+ return err\n+ }\n+ sendCh := make(chan error)\n+ listenCh := make(chan error)\n+ go func() {\n+ sendCh <- sendUDPLoop(dest, dropPort, sendloopDuration)\n+ }()\n+ go func() {\n+ listenCh <- listenUDP(acceptPort, sendloopDuration)\n+ }()\n+ select {\n+ case err := <-listenCh:\n+ if err != nil {\n+ return err\n+ }\n+ case <-time.After(sendloopDuration):\n+ return errors.New(\"timed out\")\n+ }\n+ // sendCh will always take the full sendloop time.\n+ return <-sendCh\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
iptables: loopback traffic skips prerouting chain Loopback traffic is not affected by rules in the PREROUTING chain. This change is also necessary for istio's envoy to talk to other components in the same pod.
259,858
05.06.2020 16:50:47
25,200
8d8dce418f7e4053f80b035ff257743b431859d9
Add stale issue & PR cleanup.
[ { "change_type": "ADD", "old_path": null, "new_path": ".github/workflows/stale.yml", "diff": "+name: \"Close stale issues\"\n+on:\n+ schedule:\n+ - cron: \"0 0 * * *\"\n+\n+jobs:\n+ stale:\n+ runs-on: ubuntu-latest\n+ steps:\n+ - uses: actions/stale@v3\n+ with:\n+ repo-token: ${{ secrets.GITHUB_TOKEN }}\n+ stale-issue-label: 'stale'\n+ stale-pr-label: 'stale'\n+ exempt-issue-labels: 'exported, type: bug, type: cleanup, type: enhancement, type: process, type: proposal, type: question'\n+ exempt-pr-labels: 'ready to pull'\n+ stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove the stale label or comment or this will be closed in 30 days.'\n+ stale-pr-message: 'This pull request is stale because it has been open 90 days with no activity. Remove the stale label or comment or this will be closed in 30 days.'\n+ days-before-stale: 90\n+ days-before-close: 30\n" } ]
Go
Apache License 2.0
google/gvisor
Add stale issue & PR cleanup. PiperOrigin-RevId: 315020368
259,858
05.06.2020 17:24:12
25,200
527d08f6afdea1e142c76b8abb2266525a98c2ea
Add +checkescape annotations to kvm/ring0. This analysis also catches a potential bug, which is a split on mapPhysical. This would have led to potential guest-exit during Mapping (although this would have been handled by the now-unecessary retryInGuest loop).
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/BUILD", "new_path": "pkg/sentry/platform/kvm/BUILD", "diff": "@@ -6,8 +6,8 @@ go_library(\nname = \"kvm\",\nsrcs = [\n\"address_space.go\",\n- \"allocator.go\",\n\"bluepill.go\",\n+ \"bluepill_allocator.go\",\n\"bluepill_amd64.go\",\n\"bluepill_amd64.s\",\n\"bluepill_amd64_unsafe.go\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/address_space.go", "new_path": "pkg/sentry/platform/kvm/address_space.go", "diff": "@@ -26,16 +26,15 @@ import (\n// dirtySet tracks vCPUs for invalidation.\ntype dirtySet struct {\n- vCPUs []uint64\n+ vCPUMasks []uint64\n}\n// forEach iterates over all CPUs in the dirty set.\n+//\n+//go:nosplit\nfunc (ds *dirtySet) forEach(m *machine, fn func(c *vCPU)) {\n- m.mu.RLock()\n- defer m.mu.RUnlock()\n-\n- for index := range ds.vCPUs {\n- mask := atomic.SwapUint64(&ds.vCPUs[index], 0)\n+ for index := range ds.vCPUMasks {\n+ mask := atomic.SwapUint64(&ds.vCPUMasks[index], 0)\nif mask != 0 {\nfor bit := 0; bit < 64; bit++ {\nif mask&(1<<uint64(bit)) == 0 {\n@@ -54,7 +53,7 @@ func (ds *dirtySet) mark(c *vCPU) bool {\nindex := uint64(c.id) / 64\nbit := uint64(1) << uint(c.id%64)\n- oldValue := atomic.LoadUint64(&ds.vCPUs[index])\n+ oldValue := atomic.LoadUint64(&ds.vCPUMasks[index])\nif oldValue&bit != 0 {\nreturn false // Not clean.\n}\n@@ -62,7 +61,7 @@ func (ds *dirtySet) mark(c *vCPU) bool {\n// Set the bit unilaterally, and ensure that a flush takes place. Note\n// that it's possible for races to occur here, but since the flush is\n// taking place long after these lines there's no race in practice.\n- atomicbitops.OrUint64(&ds.vCPUs[index], bit)\n+ atomicbitops.OrUint64(&ds.vCPUMasks[index], bit)\nreturn true // Previously clean.\n}\n@@ -113,7 +112,12 @@ type hostMapEntry struct {\nlength uintptr\n}\n-func (as *addressSpace) mapHost(addr usermem.Addr, m hostMapEntry, at usermem.AccessType) (inv bool) {\n+// mapLocked maps the given host entry.\n+//\n+// +checkescape:hard,stack\n+//\n+//go:nosplit\n+func (as *addressSpace) mapLocked(addr usermem.Addr, m hostMapEntry, at usermem.AccessType) (inv bool) {\nfor m.length > 0 {\nphysical, length, ok := translateToPhysical(m.addr)\nif !ok {\n@@ -133,18 +137,10 @@ func (as *addressSpace) mapHost(addr usermem.Addr, m hostMapEntry, at usermem.Ac\n// important; if the pagetable mappings were installed before\n// ensuring the physical pages were available, then some other\n// thread could theoretically access them.\n- //\n- // Due to the way KVM's shadow paging implementation works,\n- // modifications to the page tables while in host mode may not\n- // be trapped, leading to the shadow pages being out of sync.\n- // Therefore, we need to ensure that we are in guest mode for\n- // page table modifications. See the call to bluepill, below.\n- as.machine.retryInGuest(func() {\ninv = as.pageTables.Map(addr, length, pagetables.MapOpts{\nAccessType: at,\nUser: true,\n}, physical) || inv\n- })\nm.addr += length\nm.length -= length\naddr += usermem.Addr(length)\n@@ -176,6 +172,10 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f platform.File, fr platform.\nreturn err\n}\n+ // See block in mapLocked.\n+ as.pageTables.Allocator.(*allocator).cpu = as.machine.Get()\n+ defer as.machine.Put(as.pageTables.Allocator.(*allocator).cpu)\n+\n// Map the mappings in the sentry's address space (guest physical memory)\n// into the application's address space (guest virtual memory).\ninv := false\n@@ -190,7 +190,12 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f platform.File, fr platform.\n_ = s[i] // Touch to commit.\n}\n}\n- prev := as.mapHost(addr, hostMapEntry{\n+\n+ // See bluepill_allocator.go.\n+ bluepill(as.pageTables.Allocator.(*allocator).cpu)\n+\n+ // Perform the mapping.\n+ prev := as.mapLocked(addr, hostMapEntry{\naddr: b.Addr(),\nlength: uintptr(b.Len()),\n}, at)\n@@ -204,17 +209,27 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f platform.File, fr platform.\nreturn nil\n}\n+// unmapLocked is an escape-checked wrapped around Unmap.\n+//\n+// +checkescape:hard,stack\n+//\n+//go:nosplit\n+func (as *addressSpace) unmapLocked(addr usermem.Addr, length uint64) bool {\n+ return as.pageTables.Unmap(addr, uintptr(length))\n+}\n+\n// Unmap unmaps the given range by calling pagetables.PageTables.Unmap.\nfunc (as *addressSpace) Unmap(addr usermem.Addr, length uint64) {\nas.mu.Lock()\ndefer as.mu.Unlock()\n- // See above re: retryInGuest.\n- var prev bool\n- as.machine.retryInGuest(func() {\n- prev = as.pageTables.Unmap(addr, uintptr(length)) || prev\n- })\n- if prev {\n+ // See above & bluepill_allocator.go.\n+ as.pageTables.Allocator.(*allocator).cpu = as.machine.Get()\n+ defer as.machine.Put(as.pageTables.Allocator.(*allocator).cpu)\n+ bluepill(as.pageTables.Allocator.(*allocator).cpu)\n+\n+ if prev := as.unmapLocked(addr, length); prev {\n+ // Invalidate all active vCPUs.\nas.invalidate()\n// Recycle any freed intermediate pages.\n@@ -227,7 +242,7 @@ func (as *addressSpace) Release() {\nas.Unmap(0, ^uint64(0))\n// Free all pages from the allocator.\n- as.pageTables.Allocator.(allocator).base.Drain()\n+ as.pageTables.Allocator.(*allocator).base.Drain()\n// Drop all cached machine references.\nas.machine.dropPageTables(as.pageTables)\n" }, { "change_type": "RENAME", "old_path": "pkg/sentry/platform/kvm/allocator.go", "new_path": "pkg/sentry/platform/kvm/bluepill_allocator.go", "diff": "@@ -21,56 +21,80 @@ import (\n)\ntype allocator struct {\n- base *pagetables.RuntimeAllocator\n+ base pagetables.RuntimeAllocator\n+\n+ // cpu must be set prior to any pagetable operation.\n+ //\n+ // Due to the way KVM's shadow paging implementation works,\n+ // modifications to the page tables while in host mode may not be\n+ // trapped, leading to the shadow pages being out of sync. Therefore,\n+ // we need to ensure that we are in guest mode for page table\n+ // modifications. See the call to bluepill, below.\n+ cpu *vCPU\n}\n// newAllocator is used to define the allocator.\n-func newAllocator() allocator {\n- return allocator{\n- base: pagetables.NewRuntimeAllocator(),\n- }\n+func newAllocator() *allocator {\n+ a := new(allocator)\n+ a.base.Init()\n+ return a\n}\n// NewPTEs implements pagetables.Allocator.NewPTEs.\n//\n+// +checkescape:all\n+//\n//go:nosplit\n-func (a allocator) NewPTEs() *pagetables.PTEs {\n- return a.base.NewPTEs()\n+func (a *allocator) NewPTEs() *pagetables.PTEs {\n+ ptes := a.base.NewPTEs() // escapes: bluepill below.\n+ if a.cpu != nil {\n+ bluepill(a.cpu)\n+ }\n+ return ptes\n}\n// PhysicalFor returns the physical address for a set of PTEs.\n//\n+// +checkescape:all\n+//\n//go:nosplit\n-func (a allocator) PhysicalFor(ptes *pagetables.PTEs) uintptr {\n+func (a *allocator) PhysicalFor(ptes *pagetables.PTEs) uintptr {\nvirtual := a.base.PhysicalFor(ptes)\nphysical, _, ok := translateToPhysical(virtual)\nif !ok {\n- panic(fmt.Sprintf(\"PhysicalFor failed for %p\", ptes))\n+ panic(fmt.Sprintf(\"PhysicalFor failed for %p\", ptes)) // escapes: panic.\n}\nreturn physical\n}\n// LookupPTEs implements pagetables.Allocator.LookupPTEs.\n//\n+// +checkescape:all\n+//\n//go:nosplit\n-func (a allocator) LookupPTEs(physical uintptr) *pagetables.PTEs {\n+func (a *allocator) LookupPTEs(physical uintptr) *pagetables.PTEs {\nvirtualStart, physicalStart, _, ok := calculateBluepillFault(physical, physicalRegions)\nif !ok {\n- panic(fmt.Sprintf(\"LookupPTEs failed for 0x%x\", physical))\n+ panic(fmt.Sprintf(\"LookupPTEs failed for 0x%x\", physical)) // escapes: panic.\n}\nreturn a.base.LookupPTEs(virtualStart + (physical - physicalStart))\n}\n// FreePTEs implements pagetables.Allocator.FreePTEs.\n//\n+// +checkescape:all\n+//\n//go:nosplit\n-func (a allocator) FreePTEs(ptes *pagetables.PTEs) {\n- a.base.FreePTEs(ptes)\n+func (a *allocator) FreePTEs(ptes *pagetables.PTEs) {\n+ a.base.FreePTEs(ptes) // escapes: bluepill below.\n+ if a.cpu != nil {\n+ bluepill(a.cpu)\n+ }\n}\n// Recycle implements pagetables.Allocator.Recycle.\n//\n//go:nosplit\n-func (a allocator) Recycle() {\n+func (a *allocator) Recycle() {\na.base.Recycle()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_amd64.go", "new_path": "pkg/sentry/platform/kvm/bluepill_amd64.go", "diff": "@@ -63,6 +63,8 @@ func bluepillArchEnter(context *arch.SignalContext64) *vCPU {\n// KernelSyscall handles kernel syscalls.\n//\n+// +checkescape:all\n+//\n//go:nosplit\nfunc (c *vCPU) KernelSyscall() {\nregs := c.Registers()\n@@ -72,13 +74,15 @@ func (c *vCPU) KernelSyscall() {\n// We only trigger a bluepill entry in the bluepill function, and can\n// therefore be guaranteed that there is no floating point state to be\n// loaded on resuming from halt. We only worry about saving on exit.\n- ring0.SaveFloatingPoint((*byte)(c.floatingPointState))\n+ ring0.SaveFloatingPoint((*byte)(c.floatingPointState)) // escapes: no.\nring0.Halt()\n- ring0.WriteFS(uintptr(regs.Fs_base)) // Reload host segment.\n+ ring0.WriteFS(uintptr(regs.Fs_base)) // escapes: no, reload host segment.\n}\n// KernelException handles kernel exceptions.\n//\n+// +checkescape:all\n+//\n//go:nosplit\nfunc (c *vCPU) KernelException(vector ring0.Vector) {\nregs := c.Registers()\n@@ -89,9 +93,9 @@ func (c *vCPU) KernelException(vector ring0.Vector) {\nregs.Rip = 0\n}\n// See above.\n- ring0.SaveFloatingPoint((*byte)(c.floatingPointState))\n+ ring0.SaveFloatingPoint((*byte)(c.floatingPointState)) // escapes: no.\nring0.Halt()\n- ring0.WriteFS(uintptr(regs.Fs_base)) // Reload host segment.\n+ ring0.WriteFS(uintptr(regs.Fs_base)) // escapes: no; reload host segment.\n}\n// bluepillArchExit is called during bluepillEnter.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_arm64.go", "new_path": "pkg/sentry/platform/kvm/bluepill_arm64.go", "diff": "@@ -66,6 +66,8 @@ func bluepillArchExit(c *vCPU, context *arch.SignalContext64) {\n// KernelSyscall handles kernel syscalls.\n//\n+// +checkescape:all\n+//\n//go:nosplit\nfunc (c *vCPU) KernelSyscall() {\nregs := c.Registers()\n@@ -88,6 +90,8 @@ func (c *vCPU) KernelSyscall() {\n// KernelException handles kernel exceptions.\n//\n+// +checkescape:all\n+//\n//go:nosplit\nfunc (c *vCPU) KernelException(vector ring0.Vector) {\nregs := c.Registers()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "new_path": "pkg/sentry/platform/kvm/bluepill_unsafe.go", "diff": "@@ -64,6 +64,8 @@ func bluepillArchContext(context unsafe.Pointer) *arch.SignalContext64 {\n// signal stack. It should only execute raw system calls and functions that are\n// explicitly marked go:nosplit.\n//\n+// +checkescape:all\n+//\n//go:nosplit\nfunc bluepillHandler(context unsafe.Pointer) {\n// Sanitize the registers; interrupts must always be disabled.\n@@ -82,7 +84,8 @@ func bluepillHandler(context unsafe.Pointer) {\n}\nfor {\n- switch _, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(c.fd), _KVM_RUN, 0); errno {\n+ _, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(c.fd), _KVM_RUN, 0) // escapes: no.\n+ switch errno {\ncase 0: // Expected case.\ncase syscall.EINTR:\n// First, we process whatever pending signal\n@@ -90,7 +93,7 @@ func bluepillHandler(context unsafe.Pointer) {\n// currently, all signals are masked and the signal\n// must have been delivered directly to this thread.\ntimeout := syscall.Timespec{}\n- sig, _, errno := syscall.RawSyscall6(\n+ sig, _, errno := syscall.RawSyscall6( // escapes: no.\nsyscall.SYS_RT_SIGTIMEDWAIT,\nuintptr(unsafe.Pointer(&bounceSignalMask)),\n0, // siginfo.\n@@ -125,7 +128,7 @@ func bluepillHandler(context unsafe.Pointer) {\n// MMIO exit we receive EFAULT from the run ioctl. We\n// always inject an NMI here since we may be in kernel\n// mode and have interrupts disabled.\n- if _, _, errno := syscall.RawSyscall(\n+ if _, _, errno := syscall.RawSyscall( // escapes: no.\nsyscall.SYS_IOCTL,\nuintptr(c.fd),\n_KVM_NMI, 0); errno != 0 {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine.go", "new_path": "pkg/sentry/platform/kvm/machine.go", "diff": "@@ -52,16 +52,19 @@ type machine struct {\n// available is notified when vCPUs are available.\navailable sync.Cond\n- // vCPUs are the machine vCPUs.\n+ // vCPUsByTID are the machine vCPUs.\n//\n// These are populated dynamically.\n- vCPUs map[uint64]*vCPU\n+ vCPUsByTID map[uint64]*vCPU\n// vCPUsByID are the machine vCPUs, can be indexed by the vCPU's ID.\n- vCPUsByID map[int]*vCPU\n+ vCPUsByID []*vCPU\n// maxVCPUs is the maximum number of vCPUs supported by the machine.\nmaxVCPUs int\n+\n+ // nextID is the next vCPU ID.\n+ nextID uint32\n}\nconst (\n@@ -137,9 +140,8 @@ type dieState struct {\n//\n// Precondition: mu must be held.\nfunc (m *machine) newVCPU() *vCPU {\n- id := len(m.vCPUs)\n-\n// Create the vCPU.\n+ id := int(atomic.AddUint32(&m.nextID, 1) - 1)\nfd, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(m.fd), _KVM_CREATE_VCPU, uintptr(id))\nif errno != 0 {\npanic(fmt.Sprintf(\"error creating new vCPU: %v\", errno))\n@@ -176,11 +178,7 @@ func (m *machine) newVCPU() *vCPU {\n// newMachine returns a new VM context.\nfunc newMachine(vm int) (*machine, error) {\n// Create the machine.\n- m := &machine{\n- fd: vm,\n- vCPUs: make(map[uint64]*vCPU),\n- vCPUsByID: make(map[int]*vCPU),\n- }\n+ m := &machine{fd: vm}\nm.available.L = &m.mu\nm.kernel.Init(ring0.KernelOpts{\nPageTables: pagetables.New(newAllocator()),\n@@ -194,6 +192,10 @@ func newMachine(vm int) (*machine, error) {\n}\nlog.Debugf(\"The maximum number of vCPUs is %d.\", m.maxVCPUs)\n+ // Create the vCPUs map/slices.\n+ m.vCPUsByTID = make(map[uint64]*vCPU)\n+ m.vCPUsByID = make([]*vCPU, m.maxVCPUs)\n+\n// Apply the physical mappings. Note that these mappings may point to\n// guest physical addresses that are not actually available. These\n// physical pages are mapped on demand, see kernel_unsafe.go.\n@@ -274,6 +276,8 @@ func newMachine(vm int) (*machine, error) {\n// not available. This attempts to be efficient for calls in the hot path.\n//\n// This panics on error.\n+//\n+//go:nosplit\nfunc (m *machine) mapPhysical(physical, length uintptr, phyRegions []physicalRegion, flags uint32) {\nfor end := physical + length; physical < end; {\n_, physicalStart, length, ok := calculateBluepillFault(physical, phyRegions)\n@@ -304,7 +308,11 @@ func (m *machine) Destroy() {\nruntime.SetFinalizer(m, nil)\n// Destroy vCPUs.\n- for _, c := range m.vCPUs {\n+ for _, c := range m.vCPUsByID {\n+ if c == nil {\n+ continue\n+ }\n+\n// Ensure the vCPU is not still running in guest mode. This is\n// possible iff teardown has been done by other threads, and\n// somehow a single thread has not executed any system calls.\n@@ -337,7 +345,7 @@ func (m *machine) Get() *vCPU {\ntid := procid.Current()\n// Check for an exact match.\n- if c := m.vCPUs[tid]; c != nil {\n+ if c := m.vCPUsByTID[tid]; c != nil {\nc.lock()\nm.mu.RUnlock()\nreturn c\n@@ -356,7 +364,7 @@ func (m *machine) Get() *vCPU {\ntid = procid.Current()\n// Recheck for an exact match.\n- if c := m.vCPUs[tid]; c != nil {\n+ if c := m.vCPUsByTID[tid]; c != nil {\nc.lock()\nm.mu.Unlock()\nreturn c\n@@ -364,10 +372,10 @@ func (m *machine) Get() *vCPU {\nfor {\n// Scan for an available vCPU.\n- for origTID, c := range m.vCPUs {\n+ for origTID, c := range m.vCPUsByTID {\nif atomic.CompareAndSwapUint32(&c.state, vCPUReady, vCPUUser) {\n- delete(m.vCPUs, origTID)\n- m.vCPUs[tid] = c\n+ delete(m.vCPUsByTID, origTID)\n+ m.vCPUsByTID[tid] = c\nm.mu.Unlock()\nc.loadSegments(tid)\nreturn c\n@@ -375,17 +383,17 @@ func (m *machine) Get() *vCPU {\n}\n// Create a new vCPU (maybe).\n- if len(m.vCPUs) < m.maxVCPUs {\n+ if int(m.nextID) < m.maxVCPUs {\nc := m.newVCPU()\nc.lock()\n- m.vCPUs[tid] = c\n+ m.vCPUsByTID[tid] = c\nm.mu.Unlock()\nc.loadSegments(tid)\nreturn c\n}\n// Scan for something not in user mode.\n- for origTID, c := range m.vCPUs {\n+ for origTID, c := range m.vCPUsByTID {\nif !atomic.CompareAndSwapUint32(&c.state, vCPUGuest, vCPUGuest|vCPUWaiter) {\ncontinue\n}\n@@ -403,8 +411,8 @@ func (m *machine) Get() *vCPU {\n}\n// Steal the vCPU.\n- delete(m.vCPUs, origTID)\n- m.vCPUs[tid] = c\n+ delete(m.vCPUsByTID, origTID)\n+ m.vCPUsByTID[tid] = c\nm.mu.Unlock()\nc.loadSegments(tid)\nreturn c\n@@ -431,7 +439,7 @@ func (m *machine) Put(c *vCPU) {\n// newDirtySet returns a new dirty set.\nfunc (m *machine) newDirtySet() *dirtySet {\nreturn &dirtySet{\n- vCPUs: make([]uint64, (m.maxVCPUs+63)/64, (m.maxVCPUs+63)/64),\n+ vCPUMasks: make([]uint64, (m.maxVCPUs+63)/64, (m.maxVCPUs+63)/64),\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_amd64.go", "new_path": "pkg/sentry/platform/kvm/machine_amd64.go", "diff": "@@ -51,9 +51,10 @@ func (m *machine) initArchState() error {\nrecover()\ndebug.SetPanicOnFault(old)\n}()\n- m.retryInGuest(func() {\n+ c := m.Get()\n+ defer m.Put(c)\n+ bluepill(c)\nring0.SetCPUIDFaulting(true)\n- })\nreturn nil\n}\n@@ -89,8 +90,8 @@ func (m *machine) dropPageTables(pt *pagetables.PageTables) {\ndefer m.mu.Unlock()\n// Clear from all PCIDs.\n- for _, c := range m.vCPUs {\n- if c.PCIDs != nil {\n+ for _, c := range m.vCPUsByID {\n+ if c != nil && c.PCIDs != nil {\nc.PCIDs.Drop(pt)\n}\n}\n@@ -335,29 +336,6 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo)\n}\n}\n-// retryInGuest runs the given function in guest mode.\n-//\n-// If the function does not complete in guest mode (due to execution of a\n-// system call due to a GC stall, for example), then it will be retried. The\n-// given function must be idempotent as a result of the retry mechanism.\n-func (m *machine) retryInGuest(fn func()) {\n- c := m.Get()\n- defer m.Put(c)\n- for {\n- c.ClearErrorCode() // See below.\n- bluepill(c) // Force guest mode.\n- fn() // Execute the given function.\n- _, user := c.ErrorCode()\n- if user {\n- // If user is set, then we haven't bailed back to host\n- // mode via a kernel exception or system call. We\n- // consider the full function to have executed in guest\n- // mode and we can return.\n- break\n- }\n- }\n-}\n-\n// On x86 platform, the flags for \"setMemoryRegion\" can always be set as 0.\n// There is no need to return read-only physicalRegions.\nfunc rdonlyRegionsForSetMem() (phyRegions []physicalRegion) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_amd64_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_amd64_unsafe.go", "diff": "@@ -154,7 +154,7 @@ func (c *vCPU) setUserRegisters(uregs *userRegs) error {\n//\n//go:nosplit\nfunc (c *vCPU) getUserRegisters(uregs *userRegs) syscall.Errno {\n- if _, _, errno := syscall.RawSyscall(\n+ if _, _, errno := syscall.RawSyscall( // escapes: no.\nsyscall.SYS_IOCTL,\nuintptr(c.fd),\n_KVM_GET_REGS,\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "new_path": "pkg/sentry/platform/kvm/machine_unsafe.go", "diff": "@@ -115,7 +115,7 @@ func (a *atomicAddressSpace) get() *addressSpace {\n//\n//go:nosplit\nfunc (c *vCPU) notify() {\n- _, _, errno := syscall.RawSyscall6(\n+ _, _, errno := syscall.RawSyscall6( // escapes: no.\nsyscall.SYS_FUTEX,\nuintptr(unsafe.Pointer(&c.state)),\nlinux.FUTEX_WAKE|linux.FUTEX_PRIVATE_FLAG,\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/kernel.go", "new_path": "pkg/sentry/platform/ring0/kernel.go", "diff": "@@ -31,23 +31,39 @@ type defaultHooks struct{}\n// KernelSyscall implements Hooks.KernelSyscall.\n//\n+// +checkescape:all\n+//\n//go:nosplit\n-func (defaultHooks) KernelSyscall() { Halt() }\n+func (defaultHooks) KernelSyscall() {\n+ Halt()\n+}\n// KernelException implements Hooks.KernelException.\n//\n+// +checkescape:all\n+//\n//go:nosplit\n-func (defaultHooks) KernelException(Vector) { Halt() }\n+func (defaultHooks) KernelException(Vector) {\n+ Halt()\n+}\n// kernelSyscall is a trampoline.\n//\n+// +checkescape:hard,stack\n+//\n//go:nosplit\n-func kernelSyscall(c *CPU) { c.hooks.KernelSyscall() }\n+func kernelSyscall(c *CPU) {\n+ c.hooks.KernelSyscall()\n+}\n// kernelException is a trampoline.\n//\n+// +checkescape:hard,stack\n+//\n//go:nosplit\n-func kernelException(c *CPU, vector Vector) { c.hooks.KernelException(vector) }\n+func kernelException(c *CPU, vector Vector) {\n+ c.hooks.KernelException(vector)\n+}\n// Init initializes a new CPU.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/kernel_amd64.go", "new_path": "pkg/sentry/platform/ring0/kernel_amd64.go", "diff": "@@ -178,6 +178,8 @@ func IsCanonical(addr uint64) bool {\n//\n// Precondition: the Rip, Rsp, Fs and Gs registers must be canonical.\n//\n+// +checkescape:all\n+//\n//go:nosplit\nfunc (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\nuserCR3 := switchOpts.PageTables.CR3(!switchOpts.Flush, switchOpts.UserPCID)\n@@ -192,9 +194,9 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\n// Perform the switch.\nswapgs() // GS will be swapped on return.\n- WriteFS(uintptr(regs.Fs_base)) // Set application FS.\n- WriteGS(uintptr(regs.Gs_base)) // Set application GS.\n- LoadFloatingPoint(switchOpts.FloatingPointState) // Copy in floating point.\n+ WriteFS(uintptr(regs.Fs_base)) // escapes: no. Set application FS.\n+ WriteGS(uintptr(regs.Gs_base)) // escapes: no. Set application GS.\n+ LoadFloatingPoint(switchOpts.FloatingPointState) // escapes: no. Copy in floating point.\njumpToKernel() // Switch to upper half.\nwriteCR3(uintptr(userCR3)) // Change to user address space.\nif switchOpts.FullRestore {\n@@ -204,8 +206,8 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {\n}\nwriteCR3(uintptr(kernelCR3)) // Return to kernel address space.\njumpToUser() // Return to lower half.\n- SaveFloatingPoint(switchOpts.FloatingPointState) // Copy out floating point.\n- WriteFS(uintptr(c.registers.Fs_base)) // Restore kernel FS.\n+ SaveFloatingPoint(switchOpts.FloatingPointState) // escapes: no. Copy out floating point.\n+ WriteFS(uintptr(c.registers.Fs_base)) // escapes: no. Restore kernel FS.\nreturn\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/pagetables/allocator.go", "new_path": "pkg/sentry/platform/ring0/pagetables/allocator.go", "diff": "@@ -53,9 +53,14 @@ type RuntimeAllocator struct {\n// NewRuntimeAllocator returns an allocator that uses runtime allocation.\nfunc NewRuntimeAllocator() *RuntimeAllocator {\n- return &RuntimeAllocator{\n- used: make(map[*PTEs]struct{}),\n+ r := new(RuntimeAllocator)\n+ r.Init()\n+ return r\n}\n+\n+// Init initializes a RuntimeAllocator.\n+func (r *RuntimeAllocator) Init() {\n+ r.used = make(map[*PTEs]struct{})\n}\n// Recycle returns freed pages to the pool.\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/platform/ring0/pagetables/pagetables.go", "new_path": "pkg/sentry/platform/ring0/pagetables/pagetables.go", "diff": "@@ -86,6 +86,8 @@ func (*mapVisitor) requiresSplit() bool { return true }\n//\n// Precondition: addr & length must be page-aligned, their sum must not overflow.\n//\n+// +checkescape:hard,stack\n+//\n//go:nosplit\nfunc (p *PageTables) Map(addr usermem.Addr, length uintptr, opts MapOpts, physical uintptr) bool {\nif !opts.AccessType.Any() {\n@@ -128,6 +130,8 @@ func (v *unmapVisitor) visit(start uintptr, pte *PTE, align uintptr) {\n//\n// Precondition: addr & length must be page-aligned.\n//\n+// +checkescape:hard,stack\n+//\n//go:nosplit\nfunc (p *PageTables) Unmap(addr usermem.Addr, length uintptr) bool {\nw := unmapWalker{\n@@ -162,6 +166,8 @@ func (v *emptyVisitor) visit(start uintptr, pte *PTE, align uintptr) {\n//\n// Precondition: addr & length must be page-aligned.\n//\n+// +checkescape:hard,stack\n+//\n//go:nosplit\nfunc (p *PageTables) IsEmpty(addr usermem.Addr, length uintptr) bool {\nw := emptyWalker{\n@@ -197,6 +203,8 @@ func (*lookupVisitor) requiresSplit() bool { return false }\n// Lookup returns the physical address for the given virtual address.\n//\n+// +checkescape:hard,stack\n+//\n//go:nosplit\nfunc (p *PageTables) Lookup(addr usermem.Addr) (physical uintptr, opts MapOpts) {\nmask := uintptr(usermem.PageSize - 1)\n" } ]
Go
Apache License 2.0
google/gvisor
Add +checkescape annotations to kvm/ring0. This analysis also catches a potential bug, which is a split on mapPhysical. This would have led to potential guest-exit during Mapping (although this would have been handled by the now-unecessary retryInGuest loop). PiperOrigin-RevId: 315025106
259,885
08.06.2020 13:27:06
25,200
dc029b4b96e92719b2850e9d5556f68837737997
Implement VFS2 tmpfs mount options. As in VFS1, the mode, uid, and gid options are supported. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/device_file.go", "new_path": "pkg/sentry/fsimpl/tmpfs/device_file.go", "diff": "@@ -29,7 +29,7 @@ type deviceFile struct {\nminor uint32\n}\n-func (fs *filesystem) newDeviceFile(creds *auth.Credentials, mode linux.FileMode, kind vfs.DeviceKind, major, minor uint32) *inode {\n+func (fs *filesystem) newDeviceFile(kuid auth.KUID, kgid auth.KGID, mode linux.FileMode, kind vfs.DeviceKind, major, minor uint32) *inode {\nfile := &deviceFile{\nkind: kind,\nmajor: major,\n@@ -43,7 +43,7 @@ func (fs *filesystem) newDeviceFile(creds *auth.Credentials, mode linux.FileMode\ndefault:\npanic(fmt.Sprintf(\"invalid DeviceKind: %v\", kind))\n}\n- file.inode.init(file, fs, creds, mode)\n+ file.inode.init(file, fs, kuid, kgid, mode)\nfile.inode.nlink = 1 // from parent directory\nreturn &file.inode\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/directory.go", "new_path": "pkg/sentry/fsimpl/tmpfs/directory.go", "diff": "@@ -48,9 +48,9 @@ type directory struct {\nchildList dentryList\n}\n-func (fs *filesystem) newDirectory(creds *auth.Credentials, mode linux.FileMode) *directory {\n+func (fs *filesystem) newDirectory(kuid auth.KUID, kgid auth.KGID, mode linux.FileMode) *directory {\ndir := &directory{}\n- dir.inode.init(dir, fs, creds, linux.S_IFDIR|mode)\n+ dir.inode.init(dir, fs, kuid, kgid, linux.S_IFDIR|mode)\ndir.inode.nlink = 2 // from \".\" and parent directory or \"..\" for root\ndir.dentry.inode = &dir.inode\ndir.dentry.vfsd.Init(&dir.dentry)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -256,11 +256,12 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\n// MkdirAt implements vfs.FilesystemImpl.MkdirAt.\nfunc (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {\nreturn fs.doCreateAt(rp, true /* dir */, func(parentDir *directory, name string) error {\n+ creds := rp.Credentials()\nif parentDir.inode.nlink == maxLinks {\nreturn syserror.EMLINK\n}\nparentDir.inode.incLinksLocked() // from child's \"..\"\n- childDir := fs.newDirectory(rp.Credentials(), opts.Mode)\n+ childDir := fs.newDirectory(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode)\nparentDir.insertChildLocked(&childDir.dentry, name)\nreturn nil\n})\n@@ -269,18 +270,19 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v\n// MknodAt implements vfs.FilesystemImpl.MknodAt.\nfunc (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {\nreturn fs.doCreateAt(rp, false /* dir */, func(parentDir *directory, name string) error {\n+ creds := rp.Credentials()\nvar childInode *inode\nswitch opts.Mode.FileType() {\ncase 0, linux.S_IFREG:\n- childInode = fs.newRegularFile(rp.Credentials(), opts.Mode)\n+ childInode = fs.newRegularFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode)\ncase linux.S_IFIFO:\n- childInode = fs.newNamedPipe(rp.Credentials(), opts.Mode)\n+ childInode = fs.newNamedPipe(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode)\ncase linux.S_IFBLK:\n- childInode = fs.newDeviceFile(rp.Credentials(), opts.Mode, vfs.BlockDevice, opts.DevMajor, opts.DevMinor)\n+ childInode = fs.newDeviceFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode, vfs.BlockDevice, opts.DevMajor, opts.DevMinor)\ncase linux.S_IFCHR:\n- childInode = fs.newDeviceFile(rp.Credentials(), opts.Mode, vfs.CharDevice, opts.DevMajor, opts.DevMinor)\n+ childInode = fs.newDeviceFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode, vfs.CharDevice, opts.DevMajor, opts.DevMinor)\ncase linux.S_IFSOCK:\n- childInode = fs.newSocketFile(rp.Credentials(), opts.Mode, opts.Endpoint)\n+ childInode = fs.newSocketFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode, opts.Endpoint)\ndefault:\nreturn syserror.EINVAL\n}\n@@ -355,7 +357,8 @@ afterTrailingSymlink:\n}\ndefer rp.Mount().EndWrite()\n// Create and open the child.\n- child := fs.newDentry(fs.newRegularFile(rp.Credentials(), opts.Mode))\n+ creds := rp.Credentials()\n+ child := fs.newDentry(fs.newRegularFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode))\nparentDir.insertChildLocked(child, name)\nfd, err := child.open(ctx, rp, &opts, true)\nif err != nil {\n@@ -676,7 +679,8 @@ func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linu\n// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.\nfunc (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {\nreturn fs.doCreateAt(rp, false /* dir */, func(parentDir *directory, name string) error {\n- child := fs.newDentry(fs.newSymlink(rp.Credentials(), target))\n+ creds := rp.Credentials()\n+ child := fs.newDentry(fs.newSymlink(creds.EffectiveKUID, creds.EffectiveKGID, 0777, target))\nparentDir.insertChildLocked(child, name)\nreturn nil\n})\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/named_pipe.go", "new_path": "pkg/sentry/fsimpl/tmpfs/named_pipe.go", "diff": "@@ -30,9 +30,9 @@ type namedPipe struct {\n// Preconditions:\n// * fs.mu must be locked.\n// * rp.Mount().CheckBeginWrite() has been called successfully.\n-func (fs *filesystem) newNamedPipe(creds *auth.Credentials, mode linux.FileMode) *inode {\n+func (fs *filesystem) newNamedPipe(kuid auth.KUID, kgid auth.KGID, mode linux.FileMode) *inode {\nfile := &namedPipe{pipe: pipe.NewVFSPipe(true /* isNamed */, pipe.DefaultPipeSize, usermem.PageSize)}\n- file.inode.init(file, fs, creds, linux.S_IFIFO|mode)\n+ file.inode.init(file, fs, kuid, kgid, linux.S_IFIFO|mode)\nfile.inode.nlink = 1 // Only the parent has a link.\nreturn &file.inode\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go", "new_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go", "diff": "@@ -85,12 +85,12 @@ type regularFile struct {\nsize uint64\n}\n-func (fs *filesystem) newRegularFile(creds *auth.Credentials, mode linux.FileMode) *inode {\n+func (fs *filesystem) newRegularFile(kuid auth.KUID, kgid auth.KGID, mode linux.FileMode) *inode {\nfile := &regularFile{\nmemFile: fs.memFile,\nseals: linux.F_SEAL_SEAL,\n}\n- file.inode.init(file, fs, creds, linux.S_IFREG|mode)\n+ file.inode.init(file, fs, kuid, kgid, linux.S_IFREG|mode)\nfile.inode.nlink = 1 // from parent directory\nreturn &file.inode\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/socket_file.go", "new_path": "pkg/sentry/fsimpl/tmpfs/socket_file.go", "diff": "@@ -26,9 +26,9 @@ type socketFile struct {\nep transport.BoundEndpoint\n}\n-func (fs *filesystem) newSocketFile(creds *auth.Credentials, mode linux.FileMode, ep transport.BoundEndpoint) *inode {\n+func (fs *filesystem) newSocketFile(kuid auth.KUID, kgid auth.KGID, mode linux.FileMode, ep transport.BoundEndpoint) *inode {\nfile := &socketFile{ep: ep}\n- file.inode.init(file, fs, creds, mode)\n+ file.inode.init(file, fs, kuid, kgid, mode)\nfile.inode.nlink = 1 // from parent directory\nreturn &file.inode\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/symlink.go", "new_path": "pkg/sentry/fsimpl/tmpfs/symlink.go", "diff": "@@ -24,11 +24,11 @@ type symlink struct {\ntarget string // immutable\n}\n-func (fs *filesystem) newSymlink(creds *auth.Credentials, target string) *inode {\n+func (fs *filesystem) newSymlink(kuid auth.KUID, kgid auth.KGID, mode linux.FileMode, target string) *inode {\nlink := &symlink{\ntarget: target,\n}\n- link.inode.init(link, fs, creds, linux.S_IFLNK|0777)\n+ link.inode.init(link, fs, kuid, kgid, linux.S_IFLNK|mode)\nlink.inode.nlink = 1 // from parent directory\nreturn &link.inode\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -113,57 +113,78 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt\n}\n}\n- devMinor, err := vfsObj.GetAnonBlockDevMinor()\n- if err != nil {\n- return nil, nil, err\n- }\n- clock := time.RealtimeClockFromContext(ctx)\n- fs := filesystem{\n- memFile: memFileProvider.MemoryFile(),\n- clock: clock,\n- devMinor: devMinor,\n- }\n- fs.vfsfs.Init(vfsObj, newFSType, &fs)\n-\nmopts := vfs.GenericParseMountOptions(opts.Data)\n-\n- defaultMode := linux.FileMode(0777)\n- if modeStr, ok := mopts[\"mode\"]; ok {\n+ rootMode := linux.FileMode(0777)\n+ if rootFileType == linux.S_IFDIR {\n+ rootMode = 01777\n+ }\n+ modeStr, ok := mopts[\"mode\"]\n+ if ok {\n+ delete(mopts, \"mode\")\nmode, err := strconv.ParseUint(modeStr, 8, 32)\nif err != nil {\n- return nil, nil, fmt.Errorf(\"Mount option \\\"mode='%v'\\\" not parsable: %v\", modeStr, err)\n+ ctx.Warningf(\"tmpfs.FilesystemType.GetFilesystem: invalid mode: %q\", modeStr)\n+ return nil, nil, syserror.EINVAL\n}\n- defaultMode = linux.FileMode(mode)\n+ rootMode = linux.FileMode(mode & 07777)\n}\n-\n- defaultOwnerCreds := creds.Fork()\n- if uidStr, ok := mopts[\"uid\"]; ok {\n- uid, err := strconv.ParseInt(uidStr, 10, 32)\n+ rootKUID := creds.EffectiveKUID\n+ uidStr, ok := mopts[\"uid\"]\n+ if ok {\n+ delete(mopts, \"uid\")\n+ uid, err := strconv.ParseUint(uidStr, 10, 32)\nif err != nil {\n- return nil, nil, fmt.Errorf(\"Mount option \\\"uid='%v'\\\" not parsable: %v\", uidStr, err)\n+ ctx.Warningf(\"tmpfs.FilesystemType.GetFilesystem: invalid uid: %q\", uidStr)\n+ return nil, nil, syserror.EINVAL\n}\n- if err := defaultOwnerCreds.SetUID(auth.UID(uid)); err != nil {\n- return nil, nil, fmt.Errorf(\"Error using mount option \\\"uid='%v'\\\": %v\", uidStr, err)\n+ kuid := creds.UserNamespace.MapToKUID(auth.UID(uid))\n+ if !kuid.Ok() {\n+ ctx.Warningf(\"tmpfs.FilesystemType.GetFilesystem: unmapped uid: %d\", uid)\n+ return nil, nil, syserror.EINVAL\n}\n+ rootKUID = kuid\n}\n- if gidStr, ok := mopts[\"gid\"]; ok {\n- gid, err := strconv.ParseInt(gidStr, 10, 32)\n+ rootKGID := creds.EffectiveKGID\n+ gidStr, ok := mopts[\"gid\"]\n+ if ok {\n+ delete(mopts, \"gid\")\n+ gid, err := strconv.ParseUint(gidStr, 10, 32)\nif err != nil {\n- return nil, nil, fmt.Errorf(\"Mount option \\\"gid='%v'\\\" not parsable: %v\", gidStr, err)\n+ ctx.Warningf(\"tmpfs.FilesystemType.GetFilesystem: invalid gid: %q\", gidStr)\n+ return nil, nil, syserror.EINVAL\n+ }\n+ kgid := creds.UserNamespace.MapToKGID(auth.GID(gid))\n+ if !kgid.Ok() {\n+ ctx.Warningf(\"tmpfs.FilesystemType.GetFilesystem: unmapped gid: %d\", gid)\n+ return nil, nil, syserror.EINVAL\n+ }\n+ rootKGID = kgid\n}\n- if err := defaultOwnerCreds.SetGID(auth.GID(gid)); err != nil {\n- return nil, nil, fmt.Errorf(\"Error using mount option \\\"gid='%v'\\\": %v\", gidStr, err)\n+ if len(mopts) != 0 {\n+ ctx.Warningf(\"tmpfs.FilesystemType.GetFilesystem: unknown options: %v\", mopts)\n+ return nil, nil, syserror.EINVAL\n}\n+\n+ devMinor, err := vfsObj.GetAnonBlockDevMinor()\n+ if err != nil {\n+ return nil, nil, err\n}\n+ clock := time.RealtimeClockFromContext(ctx)\n+ fs := filesystem{\n+ memFile: memFileProvider.MemoryFile(),\n+ clock: clock,\n+ devMinor: devMinor,\n+ }\n+ fs.vfsfs.Init(vfsObj, newFSType, &fs)\nvar root *dentry\nswitch rootFileType {\ncase linux.S_IFREG:\n- root = fs.newDentry(fs.newRegularFile(defaultOwnerCreds, defaultMode))\n+ root = fs.newDentry(fs.newRegularFile(rootKUID, rootKGID, rootMode))\ncase linux.S_IFLNK:\n- root = fs.newDentry(fs.newSymlink(defaultOwnerCreds, tmpfsOpts.RootSymlinkTarget))\n+ root = fs.newDentry(fs.newSymlink(rootKUID, rootKGID, rootMode, tmpfsOpts.RootSymlinkTarget))\ncase linux.S_IFDIR:\n- root = &fs.newDirectory(defaultOwnerCreds, defaultMode).dentry\n+ root = &fs.newDirectory(rootKUID, rootKGID, rootMode).dentry\ndefault:\nfs.vfsfs.DecRef()\nreturn nil, nil, fmt.Errorf(\"invalid tmpfs root file type: %#o\", rootFileType)\n@@ -301,15 +322,15 @@ type inode struct {\nconst maxLinks = math.MaxUint32\n-func (i *inode) init(impl interface{}, fs *filesystem, creds *auth.Credentials, mode linux.FileMode) {\n+func (i *inode) init(impl interface{}, fs *filesystem, kuid auth.KUID, kgid auth.KGID, mode linux.FileMode) {\nif mode.FileType() == 0 {\npanic(\"file type is required in FileMode\")\n}\ni.fs = fs\ni.refs = 1\ni.mode = uint32(mode)\n- i.uid = uint32(creds.EffectiveKUID)\n- i.gid = uint32(creds.EffectiveKGID)\n+ i.uid = uint32(kuid)\n+ i.gid = uint32(kgid)\ni.ino = atomic.AddUint64(&fs.nextInoMinusOne, 1)\n// Tmpfs creation sets atime, ctime, and mtime to current time.\nnow := fs.clock.Now().Nanoseconds()\n@@ -766,8 +787,7 @@ func NewMemfd(mount *vfs.Mount, creds *auth.Credentials, allowSeals bool, name s\n// Per Linux, mm/shmem.c:__shmem_file_setup(), memfd inodes are set up with\n// S_IRWXUGO.\n- mode := linux.FileMode(0777)\n- inode := fs.newRegularFile(creds, mode)\n+ inode := fs.newRegularFile(creds.EffectiveKUID, creds.EffectiveKGID, 0777)\nrf := inode.impl.(*regularFile)\nif allowSeals {\nrf.seals = 0\n" } ]
Go
Apache License 2.0
google/gvisor
Implement VFS2 tmpfs mount options. As in VFS1, the mode, uid, and gid options are supported. Updates #1197 PiperOrigin-RevId: 315340510
259,992
08.06.2020 23:06:50
25,200
4e96b94915633cc06bf04bd680f4eeba6a764dc9
Combine executable lookup code Run vs. exec, VFS1 vs. VFS2 were executable lookup were slightly different from each other. Combine them all into the same logic.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/control/BUILD", "new_path": "pkg/sentry/control/BUILD", "diff": "@@ -16,15 +16,12 @@ go_library(\n],\ndeps = [\n\"//pkg/abi/linux\",\n- \"//pkg/context\",\n\"//pkg/fd\",\n- \"//pkg/fspath\",\n\"//pkg/log\",\n\"//pkg/sentry/fdimport\",\n\"//pkg/sentry/fs\",\n\"//pkg/sentry/fs/host\",\n\"//pkg/sentry/fs/user\",\n- \"//pkg/sentry/fsbridge\",\n\"//pkg/sentry/fsimpl/host\",\n\"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n@@ -36,7 +33,6 @@ go_library(\n\"//pkg/sentry/vfs\",\n\"//pkg/sentry/watchdog\",\n\"//pkg/sync\",\n- \"//pkg/syserror\",\n\"//pkg/tcpip/link/sniffer\",\n\"//pkg/urpc\",\n\"@org_golang_x_sys//unix:go_default_library\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/control/proc.go", "new_path": "pkg/sentry/control/proc.go", "diff": "@@ -25,13 +25,10 @@ import (\n\"golang.org/x/sys/unix\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n- \"gvisor.dev/gvisor/pkg/context\"\n- \"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/sentry/fdimport\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/host\"\n\"gvisor.dev/gvisor/pkg/sentry/fs/user\"\n- \"gvisor.dev/gvisor/pkg/sentry/fsbridge\"\nhostvfs2 \"gvisor.dev/gvisor/pkg/sentry/fsimpl/host\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n@@ -39,7 +36,6 @@ import (\n\"gvisor.dev/gvisor/pkg/sentry/limits\"\n\"gvisor.dev/gvisor/pkg/sentry/usage\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n- \"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/urpc\"\n)\n@@ -107,6 +103,9 @@ type ExecArgs struct {\n// String prints the arguments as a string.\nfunc (args ExecArgs) String() string {\n+ if len(args.Argv) == 0 {\n+ return args.Filename\n+ }\na := make([]string, len(args.Argv))\ncopy(a, args.Argv)\nif args.Filename != \"\" {\n@@ -179,7 +178,6 @@ func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, kernel.ThreadI\n}\nctx := initArgs.NewContext(proc.Kernel)\n- if initArgs.Filename == \"\" {\nif kernel.VFS2Enabled {\n// Get the full path to the filename from the PATH env variable.\nif initArgs.MountNamespaceVFS2 == nil {\n@@ -189,11 +187,6 @@ func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, kernel.ThreadI\n// transferred to the new process.\ninitArgs.MountNamespaceVFS2 = proc.Kernel.GlobalInit().Leader().MountNamespaceVFS2()\n}\n- file, err := getExecutableFD(ctx, creds, proc.Kernel.VFS(), initArgs.MountNamespaceVFS2, initArgs.Envv, initArgs.WorkingDirectory, initArgs.Argv[0])\n- if err != nil {\n- return nil, 0, nil, nil, fmt.Errorf(\"error finding executable %q in environment %v: %v\", initArgs.Argv[0], initArgs.Envv, err)\n- }\n- initArgs.File = fsbridge.NewVFSFile(file)\n} else {\nif initArgs.MountNamespace == nil {\n// Set initArgs so that 'ctx' returns the namespace.\n@@ -203,13 +196,12 @@ func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, kernel.ThreadI\n// be donated to the new process in CreateProcess.\ninitArgs.MountNamespace.IncRef()\n}\n- f, err := user.ResolveExecutablePath(ctx, creds, initArgs.MountNamespace, initArgs.Envv, initArgs.WorkingDirectory, initArgs.Argv[0])\n- if err != nil {\n- return nil, 0, nil, nil, fmt.Errorf(\"error finding executable %q in PATH %v: %v\", initArgs.Argv[0], initArgs.Envv, err)\n- }\n- initArgs.Filename = f\n}\n+ resolved, err := user.ResolveExecutablePath(ctx, &initArgs)\n+ if err != nil {\n+ return nil, 0, nil, nil, err\n}\n+ initArgs.Filename = resolved\nfds := make([]int, len(args.FilePayload.Files))\nfor i, file := range args.FilePayload.Files {\n@@ -422,31 +414,3 @@ func ttyName(tty *kernel.TTY) string {\n}\nreturn fmt.Sprintf(\"pts/%d\", tty.Index)\n}\n-\n-// getExecutableFD resolves the given executable name and returns a\n-// vfs.FileDescription for the executable file.\n-func getExecutableFD(ctx context.Context, creds *auth.Credentials, vfsObj *vfs.VirtualFilesystem, mns *vfs.MountNamespace, envv []string, wd, name string) (*vfs.FileDescription, error) {\n- path, err := user.ResolveExecutablePathVFS2(ctx, creds, mns, envv, wd, name)\n- if err != nil {\n- return nil, err\n- }\n-\n- root := vfs.RootFromContext(ctx)\n- defer root.DecRef()\n-\n- pop := vfs.PathOperation{\n- Root: root,\n- Start: root, // binPath is absolute, Start can be anything.\n- Path: fspath.Parse(path),\n- FollowFinalSymlink: true,\n- }\n- opts := &vfs.OpenOptions{\n- Flags: linux.O_RDONLY,\n- FileExec: true,\n- }\n- f, err := vfsObj.OpenAt(ctx, creds, &pop, opts)\n- if err == syserror.ENOENT || err == syserror.EACCES {\n- return nil, nil\n- }\n- return f, err\n-}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/user/BUILD", "new_path": "pkg/sentry/fs/user/BUILD", "diff": "@@ -15,6 +15,7 @@ go_library(\n\"//pkg/fspath\",\n\"//pkg/log\",\n\"//pkg/sentry/fs\",\n+ \"//pkg/sentry/kernel\",\n\"//pkg/sentry/kernel/auth\",\n\"//pkg/sentry/vfs\",\n\"//pkg/syserror\",\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/user/path.go", "new_path": "pkg/sentry/fs/user/path.go", "diff": "@@ -24,6 +24,7 @@ import (\n\"gvisor.dev/gvisor/pkg/fspath\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/fs\"\n+ \"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel/auth\"\n\"gvisor.dev/gvisor/pkg/sentry/vfs\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -31,7 +32,15 @@ import (\n// ResolveExecutablePath resolves the given executable name given the working\n// dir and environment.\n-func ResolveExecutablePath(ctx context.Context, creds *auth.Credentials, mns *fs.MountNamespace, envv []string, wd, name string) (string, error) {\n+func ResolveExecutablePath(ctx context.Context, args *kernel.CreateProcessArgs) (string, error) {\n+ name := args.Filename\n+ if len(name) == 0 {\n+ if len(args.Argv) == 0 {\n+ return \"\", fmt.Errorf(\"no filename or command provided\")\n+ }\n+ name = args.Argv[0]\n+ }\n+\n// Absolute paths can be used directly.\nif path.IsAbs(name) {\nreturn name, nil\n@@ -40,6 +49,7 @@ func ResolveExecutablePath(ctx context.Context, creds *auth.Credentials, mns *fs\n// Paths with '/' in them should be joined to the working directory, or\n// to the root if working directory is not set.\nif strings.IndexByte(name, '/') > 0 {\n+ wd := args.WorkingDirectory\nif wd == \"\" {\nwd = \"/\"\n}\n@@ -49,10 +59,24 @@ func ResolveExecutablePath(ctx context.Context, creds *auth.Credentials, mns *fs\nreturn path.Join(wd, name), nil\n}\n- // Otherwise, We must lookup the name in the paths, starting from the\n- // calling context's root directory.\n- paths := getPath(envv)\n+ // Otherwise, We must lookup the name in the paths.\n+ paths := getPath(args.Envv)\n+ if kernel.VFS2Enabled {\n+ f, err := resolveVFS2(ctx, args.Credentials, args.MountNamespaceVFS2, paths, name)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error finding executable %q in PATH %v: %v\", name, paths, err)\n+ }\n+ return f, nil\n+ }\n+\n+ f, err := resolve(ctx, args.MountNamespace, paths, name)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"error finding executable %q in PATH %v: %v\", name, paths, err)\n+ }\n+ return f, nil\n+}\n+func resolve(ctx context.Context, mns *fs.MountNamespace, paths []string, name string) (string, error) {\nroot := fs.RootFromContext(ctx)\nif root == nil {\n// Caller has no root. Don't bother traversing anything.\n@@ -95,30 +119,7 @@ func ResolveExecutablePath(ctx context.Context, creds *auth.Credentials, mns *fs\nreturn \"\", syserror.ENOENT\n}\n-// ResolveExecutablePathVFS2 resolves the given executable name given the\n-// working dir and environment.\n-func ResolveExecutablePathVFS2(ctx context.Context, creds *auth.Credentials, mns *vfs.MountNamespace, envv []string, wd, name string) (string, error) {\n- // Absolute paths can be used directly.\n- if path.IsAbs(name) {\n- return name, nil\n- }\n-\n- // Paths with '/' in them should be joined to the working directory, or\n- // to the root if working directory is not set.\n- if strings.IndexByte(name, '/') > 0 {\n- if wd == \"\" {\n- wd = \"/\"\n- }\n- if !path.IsAbs(wd) {\n- return \"\", fmt.Errorf(\"working directory %q must be absolute\", wd)\n- }\n- return path.Join(wd, name), nil\n- }\n-\n- // Otherwise, We must lookup the name in the paths, starting from the\n- // calling context's root directory.\n- paths := getPath(envv)\n-\n+func resolveVFS2(ctx context.Context, creds *auth.Credentials, mns *vfs.MountNamespace, paths []string, name string) (string, error) {\nroot := mns.Root()\ndefer root.DecRef()\nfor _, p := range paths {\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/fs.go", "new_path": "runsc/boot/fs.go", "diff": "@@ -293,11 +293,11 @@ func setupContainerFS(ctx context.Context, conf *Config, mntr *containerMounter,\nprocArgs.MountNamespace = mns\n// Resolve the executable path from working dir and environment.\n- f, err := user.ResolveExecutablePath(ctx, procArgs.Credentials, procArgs.MountNamespace, procArgs.Envv, procArgs.WorkingDirectory, procArgs.Argv[0])\n+ resolved, err := user.ResolveExecutablePath(ctx, procArgs)\nif err != nil {\n- return fmt.Errorf(\"searching for executable %q, cwd: %q, envv: %q: %v\", procArgs.Argv[0], procArgs.WorkingDirectory, procArgs.Envv, err)\n+ return err\n}\n- procArgs.Filename = f\n+ procArgs.Filename = resolved\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/vfs.go", "new_path": "runsc/boot/vfs.go", "diff": "@@ -96,11 +96,11 @@ func setupContainerVFS2(ctx context.Context, conf *Config, mntr *containerMounte\nprocArgs.MountNamespaceVFS2 = mns\n// Resolve the executable path from working dir and environment.\n- f, err := user.ResolveExecutablePathVFS2(ctx, procArgs.Credentials, procArgs.MountNamespaceVFS2, procArgs.Envv, procArgs.WorkingDirectory, procArgs.Argv[0])\n+ resolved, err := user.ResolveExecutablePath(ctx, procArgs)\nif err != nil {\n- return fmt.Errorf(\"searching for executable %q, cwd: %q, envv: %q: %v\", procArgs.Argv[0], procArgs.WorkingDirectory, procArgs.Envv, err)\n+ return err\n}\n- procArgs.Filename = f\n+ procArgs.Filename = resolved\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/container_test.go", "new_path": "runsc/container/container_test.go", "diff": "@@ -20,6 +20,7 @@ import (\n\"fmt\"\n\"io\"\n\"io/ioutil\"\n+ \"math\"\n\"os\"\n\"path\"\n\"path/filepath\"\n@@ -53,9 +54,8 @@ func waitForProcessList(cont *Container, want []*control.Process) error {\nerr = fmt.Errorf(\"error getting process data from container: %v\", err)\nreturn &backoff.PermanentError{Err: err}\n}\n- if r, err := procListsEqual(got, want); !r {\n- return fmt.Errorf(\"container got process list: %s, want: %s: error: %v\",\n- procListToString(got), procListToString(want), err)\n+ if !procListsEqual(got, want) {\n+ return fmt.Errorf(\"container got process list: %s, want: %s\", procListToString(got), procListToString(want))\n}\nreturn nil\n}\n@@ -92,36 +92,72 @@ func blockUntilWaitable(pid int) error {\nreturn err\n}\n-// procListsEqual is used to check whether 2 Process lists are equal for all\n-// implemented fields.\n-func procListsEqual(got, want []*control.Process) (bool, error) {\n- if len(got) != len(want) {\n- return false, nil\n+// procListsEqual is used to check whether 2 Process lists are equal. Fields\n+// set to -1 in wants are ignored. Timestamp and threads fields are always\n+// ignored.\n+func procListsEqual(gots, wants []*control.Process) bool {\n+ if len(gots) != len(wants) {\n+ return false\n}\n- for i := range got {\n- pd1 := got[i]\n- pd2 := want[i]\n- // Zero out timing dependant fields.\n- pd1.Time = \"\"\n- pd1.STime = \"\"\n- pd1.C = 0\n- // Ignore TTY field too, since it's not relevant in the cases\n- // where we use this method. Tests that care about the TTY\n- // field should check for it themselves.\n- pd1.TTY = \"\"\n- pd1Json, err := control.ProcessListToJSON([]*control.Process{pd1})\n- if err != nil {\n- return false, err\n+ for i := range gots {\n+ got := gots[i]\n+ want := wants[i]\n+\n+ if want.UID != math.MaxUint32 && want.UID != got.UID {\n+ return false\n}\n- pd2Json, err := control.ProcessListToJSON([]*control.Process{pd2})\n- if err != nil {\n- return false, err\n+ if want.PID != -1 && want.PID != got.PID {\n+ return false\n+ }\n+ if want.PPID != -1 && want.PPID != got.PPID {\n+ return false\n+ }\n+ if len(want.TTY) != 0 && want.TTY != got.TTY {\n+ return false\n+ }\n+ if len(want.Cmd) != 0 && want.Cmd != got.Cmd {\n+ return false\n+ }\n+ }\n+ return true\n+}\n+\n+type processBuilder struct {\n+ process control.Process\n+}\n+\n+func newProcessBuilder() *processBuilder {\n+ return &processBuilder{\n+ process: control.Process{\n+ UID: math.MaxUint32,\n+ PID: -1,\n+ PPID: -1,\n+ },\n}\n- if pd1Json != pd2Json {\n- return false, nil\n}\n+\n+func (p *processBuilder) Cmd(cmd string) *processBuilder {\n+ p.process.Cmd = cmd\n+ return p\n+}\n+\n+func (p *processBuilder) PID(pid kernel.ThreadID) *processBuilder {\n+ p.process.PID = pid\n+ return p\n}\n- return true, nil\n+\n+func (p *processBuilder) PPID(ppid kernel.ThreadID) *processBuilder {\n+ p.process.PPID = ppid\n+ return p\n+}\n+\n+func (p *processBuilder) UID(uid auth.KUID) *processBuilder {\n+ p.process.UID = uid\n+ return p\n+}\n+\n+func (p *processBuilder) Process() *control.Process {\n+ return &p.process\n}\nfunc procListToString(pl []*control.Process) string {\n@@ -323,14 +359,7 @@ func TestLifecycle(t *testing.T) {\n// expectedPL lists the expected process state of the container.\nexpectedPL := []*control.Process{\n- {\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- Threads: []kernel.ThreadID{1},\n- },\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n}\n// Create the container.\nargs := Args{\n@@ -608,10 +637,14 @@ func doAppExitStatus(t *testing.T, vfs2 bool) {\n// TestExec verifies that a container can exec a new program.\nfunc TestExec(t *testing.T) {\n- for name, conf := range configsWithVFS2(t, overlay) {\n+ for name, conf := range configsWithVFS2(t, all...) {\nt.Run(name, func(t *testing.T) {\n- const uid = 343\n- spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"exec-test\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ cmd := fmt.Sprintf(\"ln -s /bin/true %q/symlink && sleep 100\", dir)\n+ spec := testutil.NewSpecWithArgs(\"sh\", \"-c\", cmd)\n_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\nif err != nil {\n@@ -634,29 +667,127 @@ func TestExec(t *testing.T) {\nt.Fatalf(\"error starting container: %v\", err)\n}\n- // expectedPL lists the expected process state of the container.\n+ // Wait until sleep is running to ensure the symlink was created.\nexpectedPL := []*control.Process{\n+ newProcessBuilder().Cmd(\"sh\").Process(),\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n+ }\n+ if err := waitForProcessList(cont, expectedPL); err != nil {\n+ t.Fatalf(\"waitForProcessList: %v\", err)\n+ }\n+\n+ for _, tc := range []struct {\n+ name string\n+ args control.ExecArgs\n+ }{\n+ {\n+ name: \"complete\",\n+ args: control.ExecArgs{\n+ Filename: \"/bin/true\",\n+ Argv: []string{\"/bin/true\"},\n+ },\n+ },\n{\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- Threads: []kernel.ThreadID{1},\n+ name: \"filename\",\n+ args: control.ExecArgs{\n+ Filename: \"/bin/true\",\n+ },\n},\n{\n- UID: uid,\n- PID: 2,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- Threads: []kernel.ThreadID{2},\n+ name: \"argv\",\n+ args: control.ExecArgs{\n+ Argv: []string{\"/bin/true\"},\n},\n+ },\n+ {\n+ name: \"filename resolution\",\n+ args: control.ExecArgs{\n+ Filename: \"true\",\n+ Envv: []string{\"PATH=/bin\"},\n+ },\n+ },\n+ {\n+ name: \"argv resolution\",\n+ args: control.ExecArgs{\n+ Argv: []string{\"true\"},\n+ Envv: []string{\"PATH=/bin\"},\n+ },\n+ },\n+ {\n+ name: \"argv symlink\",\n+ args: control.ExecArgs{\n+ Argv: []string{filepath.Join(dir, \"symlink\")},\n+ },\n+ },\n+ {\n+ name: \"working dir\",\n+ args: control.ExecArgs{\n+ Argv: []string{\"/bin/sh\", \"-c\", `if [[ \"${PWD}\" != \"/tmp\" ]]; then exit 1; fi`},\n+ WorkingDirectory: \"/tmp\",\n+ },\n+ },\n+ {\n+ name: \"user\",\n+ args: control.ExecArgs{\n+ Argv: []string{\"/bin/sh\", \"-c\", `if [[ \"$(id -u)\" != \"343\" ]]; then exit 1; fi`},\n+ KUID: 343,\n+ },\n+ },\n+ {\n+ name: \"group\",\n+ args: control.ExecArgs{\n+ Argv: []string{\"/bin/sh\", \"-c\", `if [[ \"$(id -g)\" != \"343\" ]]; then exit 1; fi`},\n+ KGID: 343,\n+ },\n+ },\n+ {\n+ name: \"env\",\n+ args: control.ExecArgs{\n+ Argv: []string{\"/bin/sh\", \"-c\", `if [[ \"${FOO}\" != \"123\" ]]; then exit 1; fi`},\n+ Envv: []string{\"FOO=123\"},\n+ },\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ // t.Parallel()\n+ if ws, err := cont.executeSync(&tc.args); err != nil {\n+ t.Fatalf(\"executeAsync(%+v): %v\", tc.args, err)\n+ } else if ws != 0 {\n+ t.Fatalf(\"executeAsync(%+v) failed with exit: %v\", tc.args, ws)\n+ }\n+ })\n+ }\n+ })\n+ }\n}\n- // Verify that \"sleep 100\" is running.\n- if err := waitForProcessList(cont, expectedPL[:1]); err != nil {\n- t.Error(err)\n+// TestExecProcList verifies that a container can exec a new program and it\n+// shows correcly in the process list.\n+func TestExecProcList(t *testing.T) {\n+ for name, conf := range configsWithVFS2(t, all...) {\n+ t.Run(name, func(t *testing.T) {\n+ const uid = 343\n+ spec := testutil.NewSpecWithArgs(\"sleep\", \"100\")\n+\n+ _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)\n+ if err != nil {\n+ t.Fatalf(\"error setting up container: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ // Create and start the container.\n+ args := Args{\n+ ID: testutil.RandomContainerID(),\n+ Spec: spec,\n+ BundleDir: bundleDir,\n+ }\n+ cont, err := New(conf, args)\n+ if err != nil {\n+ t.Fatalf(\"error creating container: %v\", err)\n+ }\n+ defer cont.Destroy()\n+ if err := cont.Start(conf); err != nil {\n+ t.Fatalf(\"error starting container: %v\", err)\n}\nexecArgs := &control.ExecArgs{\n@@ -666,9 +797,8 @@ func TestExec(t *testing.T) {\nKUID: uid,\n}\n- // Verify that \"sleep 100\" and \"sleep 5\" are running\n- // after exec. First, start running exec (whick\n- // blocks).\n+ // Verify that \"sleep 100\" and \"sleep 5\" are running after exec. First,\n+ // start running exec (which blocks).\nch := make(chan error)\ngo func() {\nexitStatus, err := cont.executeSync(execArgs)\n@@ -681,6 +811,11 @@ func TestExec(t *testing.T) {\n}\n}()\n+ // expectedPL lists the expected process state of the container.\n+ expectedPL := []*control.Process{\n+ newProcessBuilder().PID(1).PPID(0).Cmd(\"sleep\").UID(0).Process(),\n+ newProcessBuilder().PID(2).PPID(0).Cmd(\"sleep\").UID(uid).Process(),\n+ }\nif err := waitForProcessList(cont, expectedPL); err != nil {\nt.Fatalf(\"error waiting for processes: %v\", err)\n}\n@@ -1242,24 +1377,9 @@ func TestCapabilities(t *testing.T) {\n// expectedPL lists the expected process state of the container.\nexpectedPL := []*control.Process{\n- {\n- UID: 0,\n- PID: 1,\n- PPID: 0,\n- C: 0,\n- Cmd: \"sleep\",\n- Threads: []kernel.ThreadID{1},\n- },\n- {\n- UID: uid,\n- PID: 2,\n- PPID: 0,\n- C: 0,\n- Cmd: \"exe\",\n- Threads: []kernel.ThreadID{2},\n- },\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n}\n- if err := waitForProcessList(cont, expectedPL[:1]); err != nil {\n+ if err := waitForProcessList(cont, expectedPL); err != nil {\nt.Fatalf(\"Failed to wait for sleep to start, err: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -149,13 +149,13 @@ func TestMultiContainerSanity(t *testing.T) {\n// Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().PID(1).PPID(0).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\nexpectedPL = []*control.Process{\n- {PID: 2, Cmd: \"sleep\", Threads: []kernel.ThreadID{2}},\n+ newProcessBuilder().PID(2).PPID(0).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[1], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n@@ -195,13 +195,13 @@ func TestMultiPIDNS(t *testing.T) {\n// Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().PID(1).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\nexpectedPL = []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().PID(1).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[1], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n@@ -257,7 +257,7 @@ func TestMultiPIDNSPath(t *testing.T) {\n// Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().PID(1).PPID(0).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n@@ -267,7 +267,7 @@ func TestMultiPIDNSPath(t *testing.T) {\n}\nexpectedPL = []*control.Process{\n- {PID: 2, Cmd: \"sleep\", Threads: []kernel.ThreadID{2}},\n+ newProcessBuilder().PID(2).PPID(0).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[1], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n@@ -300,7 +300,7 @@ func TestMultiContainerWait(t *testing.T) {\n// Check via ps that multiple processes are running.\nexpectedPL := []*control.Process{\n- {PID: 2, Cmd: \"sleep\", Threads: []kernel.ThreadID{2}},\n+ newProcessBuilder().PID(2).PPID(0).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[1], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n@@ -345,7 +345,7 @@ func TestMultiContainerWait(t *testing.T) {\n// After Wait returns, ensure that the root container is running and\n// the child has finished.\nexpectedPL = []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Errorf(\"failed to wait for %q to start: %v\", strings.Join(containers[0].Spec.Process.Args, \" \"), err)\n@@ -377,7 +377,7 @@ func TestExecWait(t *testing.T) {\n// Check via ps that process is running.\nexpectedPL := []*control.Process{\n- {PID: 2, Cmd: \"sleep\", Threads: []kernel.ThreadID{2}},\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[1], expectedPL); err != nil {\nt.Fatalf(\"failed to wait for sleep to start: %v\", err)\n@@ -412,7 +412,7 @@ func TestExecWait(t *testing.T) {\n// Wait for the exec'd process to exit.\nexpectedPL = []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().PID(1).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Fatalf(\"failed to wait for second container to stop: %v\", err)\n@@ -498,9 +498,8 @@ func TestMultiContainerSignal(t *testing.T) {\n// Check via ps that container 1 process is running.\nexpectedPL := []*control.Process{\n- {PID: 2, Cmd: \"sleep\", Threads: []kernel.ThreadID{2}},\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n}\n-\nif err := waitForProcessList(containers[1], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n}\n@@ -512,7 +511,7 @@ func TestMultiContainerSignal(t *testing.T) {\n// Make sure process 1 is still running.\nexpectedPL = []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().PID(1).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[0], expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n@@ -626,8 +625,10 @@ func TestMultiContainerDestroy(t *testing.T) {\nif err != nil {\nt.Fatalf(\"error getting process data from sandbox: %v\", err)\n}\n- expectedPL := []*control.Process{{PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}}}\n- if r, err := procListsEqual(pss, expectedPL); !r {\n+ expectedPL := []*control.Process{\n+ newProcessBuilder().PID(1).Cmd(\"sleep\").Process(),\n+ }\n+ if !procListsEqual(pss, expectedPL) {\nt.Errorf(\"container got process list: %s, want: %s: error: %v\",\nprocListToString(pss), procListToString(expectedPL), err)\n}\n@@ -664,7 +665,7 @@ func TestMultiContainerProcesses(t *testing.T) {\n// Check root's container process list doesn't include other containers.\nexpectedPL0 := []*control.Process{\n- {PID: 1, Cmd: \"sleep\", Threads: []kernel.ThreadID{1}},\n+ newProcessBuilder().PID(1).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[0], expectedPL0); err != nil {\nt.Errorf(\"failed to wait for process to start: %v\", err)\n@@ -672,8 +673,8 @@ func TestMultiContainerProcesses(t *testing.T) {\n// Same for the other container.\nexpectedPL1 := []*control.Process{\n- {PID: 2, Cmd: \"sh\", Threads: []kernel.ThreadID{2}},\n- {PID: 3, PPID: 2, Cmd: \"sleep\", Threads: []kernel.ThreadID{3}},\n+ newProcessBuilder().PID(2).Cmd(\"sh\").Process(),\n+ newProcessBuilder().PID(3).PPID(2).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(containers[1], expectedPL1); err != nil {\nt.Errorf(\"failed to wait for process to start: %v\", err)\n@@ -687,7 +688,7 @@ func TestMultiContainerProcesses(t *testing.T) {\nif _, err := containers[1].Execute(args); err != nil {\nt.Fatalf(\"error exec'ing: %v\", err)\n}\n- expectedPL1 = append(expectedPL1, &control.Process{PID: 4, Cmd: \"sleep\", Threads: []kernel.ThreadID{4}})\n+ expectedPL1 = append(expectedPL1, newProcessBuilder().PID(4).Cmd(\"sleep\").Process())\nif err := waitForProcessList(containers[1], expectedPL1); err != nil {\nt.Errorf(\"failed to wait for process to start: %v\", err)\n}\n@@ -1505,7 +1506,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {\n// Ensure container is running\nc := containers[2]\nexpectedPL := []*control.Process{\n- {PID: 3, Cmd: \"sleep\", Threads: []kernel.ThreadID{3}},\n+ newProcessBuilder().PID(3).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(c, expectedPL); err != nil {\nt.Errorf(\"failed to wait for sleep to start: %v\", err)\n@@ -1533,7 +1534,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {\ncontinue // container[2] has been killed.\n}\npl := []*control.Process{\n- {PID: kernel.ThreadID(i + 1), Cmd: \"sleep\", Threads: []kernel.ThreadID{kernel.ThreadID(i + 1)}},\n+ newProcessBuilder().PID(kernel.ThreadID(i + 1)).Cmd(\"sleep\").Process(),\n}\nif err := waitForProcessList(c, pl); err != nil {\nt.Errorf(\"Container %q was affected by another container: %v\", c.ID, err)\n@@ -1553,7 +1554,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {\n// Wait until sandbox stops. waitForProcessList will loop until sandbox exits\n// and RPC errors out.\nimpossiblePL := []*control.Process{\n- {PID: 100, Cmd: \"non-existent-process\", Threads: []kernel.ThreadID{100}},\n+ newProcessBuilder().Cmd(\"non-existent-process\").Process(),\n}\nif err := waitForProcessList(c, impossiblePL); err == nil {\nt.Fatalf(\"Sandbox was not killed after gofer death\")\n" } ]
Go
Apache License 2.0
google/gvisor
Combine executable lookup code Run vs. exec, VFS1 vs. VFS2 were executable lookup were slightly different from each other. Combine them all into the same logic. PiperOrigin-RevId: 315426443
259,891
09.06.2020 10:45:31
25,200
20afd66e019bf0aaaf66e854135cd2c0fe0dfd92
Invoke bazel query via bash function.
[ { "change_type": "MODIFY", "old_path": "scripts/common_build.sh", "new_path": "scripts/common_build.sh", "diff": "@@ -63,6 +63,10 @@ function run_as_root() {\nbazel run --run_under=\"sudo\" \"${binary}\" -- \"$@\"\n}\n+function query() {\n+ QUERY_RESULT=$(bazel query \"$@\")\n+}\n+\nfunction collect_logs() {\n# Zip out everything into a convenient form.\nif [[ -v KOKORO_ARTIFACTS_DIR ]] && [[ -e bazel-testlogs ]]; then\n" }, { "change_type": "MODIFY", "old_path": "scripts/packetdrill_tests.sh", "new_path": "scripts/packetdrill_tests.sh", "diff": "@@ -19,4 +19,5 @@ source $(dirname $0)/common.sh\nmake load-packetdrill\ninstall_runsc_for_test runsc-d\n-test_runsc $(bazel query \"attr(tags, manual, tests(//test/packetdrill/...))\")\n+query \"attr(tags, manual, tests(//test/packetdrill/...))\"\n+test_runsc $QUERY_RESULT\n" }, { "change_type": "MODIFY", "old_path": "scripts/packetimpact_tests.sh", "new_path": "scripts/packetimpact_tests.sh", "diff": "@@ -19,4 +19,5 @@ source $(dirname $0)/common.sh\nmake load-packetimpact\ninstall_runsc_for_test runsc-d\n-test_runsc $(bazel query \"attr(tags, packetimpact, tests(//test/packetimpact/...))\")\n+query \"attr(tags, packetimpact, tests(//test/packetimpact/...))\"\n+test_runsc $QUERY_RESULT\n" } ]
Go
Apache License 2.0
google/gvisor
Invoke bazel query via bash function. PiperOrigin-RevId: 315514034
259,885
09.06.2020 11:14:24
25,200
ecff24930cb2dd5b0910da859d6e712f2f1d32c4
Ensure pgalloc.MemoryFile.fileSize is always chunk-aligned. findAvailableLocked() may return a non-aligned FileRange.End after expansion since it may round FileRange.Start down to a hugepage boundary.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/pgalloc/pgalloc.go", "new_path": "pkg/sentry/pgalloc/pgalloc.go", "diff": "@@ -393,16 +393,17 @@ func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (platform.Fi\nreturn platform.FileRange{}, syserror.ENOMEM\n}\n- // Expand the file if needed. Note that findAvailableRange will\n- // appropriately double the fileSize when required.\n+ // Expand the file if needed.\nif int64(fr.End) > f.fileSize {\n- if err := f.file.Truncate(int64(fr.End)); err != nil {\n+ // Round the new file size up to be chunk-aligned.\n+ newFileSize := (int64(fr.End) + chunkMask) &^ chunkMask\n+ if err := f.file.Truncate(newFileSize); err != nil {\nreturn platform.FileRange{}, err\n}\n- f.fileSize = int64(fr.End)\n+ f.fileSize = newFileSize\nf.mappingsMu.Lock()\noldMappings := f.mappings.Load().([]uintptr)\n- newMappings := make([]uintptr, f.fileSize>>chunkShift)\n+ newMappings := make([]uintptr, newFileSize>>chunkShift)\ncopy(newMappings, oldMappings)\nf.mappings.Store(newMappings)\nf.mappingsMu.Unlock()\n" } ]
Go
Apache License 2.0
google/gvisor
Ensure pgalloc.MemoryFile.fileSize is always chunk-aligned. findAvailableLocked() may return a non-aligned FileRange.End after expansion since it may round FileRange.Start down to a hugepage boundary. PiperOrigin-RevId: 315520321
259,992
09.06.2020 16:28:58
25,200
0ae5bd24d72a6809a8ffb9c6a31fd2621180e3df
Mount root and volumes as read-only if --overlay is enabled
[ { "change_type": "MODIFY", "old_path": "runsc/cmd/gofer.go", "new_path": "runsc/cmd/gofer.go", "diff": "@@ -306,7 +306,7 @@ func setupRootFS(spec *specs.Spec, conf *boot.Config) error {\n}\n// Replace the current spec, with the clean spec with symlinks resolved.\n- if err := setupMounts(spec.Mounts, root); err != nil {\n+ if err := setupMounts(conf, spec.Mounts, root); err != nil {\nFatalf(\"error setting up FS: %v\", err)\n}\n@@ -322,7 +322,7 @@ func setupRootFS(spec *specs.Spec, conf *boot.Config) error {\n}\n// Check if root needs to be remounted as readonly.\n- if spec.Root.Readonly {\n+ if spec.Root.Readonly || conf.Overlay {\n// If root is a mount point but not read-only, we can change mount options\n// to make it read-only for extra safety.\nlog.Infof(\"Remounting root as readonly: %q\", root)\n@@ -346,7 +346,7 @@ func setupRootFS(spec *specs.Spec, conf *boot.Config) error {\n// setupMounts binds mount all mounts specified in the spec in their correct\n// location inside root. It will resolve relative paths and symlinks. It also\n// creates directories as needed.\n-func setupMounts(mounts []specs.Mount, root string) error {\n+func setupMounts(conf *boot.Config, mounts []specs.Mount, root string) error {\nfor _, m := range mounts {\nif m.Type != \"bind\" || !specutils.IsSupportedDevMount(m) {\ncontinue\n@@ -358,6 +358,11 @@ func setupMounts(mounts []specs.Mount, root string) error {\n}\nflags := specutils.OptionsToFlags(m.Options) | syscall.MS_BIND\n+ if conf.Overlay {\n+ // Force mount read-only if writes are not going to be sent to it.\n+ flags |= syscall.MS_RDONLY\n+ }\n+\nlog.Infof(\"Mounting src: %q, dst: %q, flags: %#x\", m.Source, dst, flags)\nif err := specutils.Mount(m.Source, dst, m.Type, flags); err != nil {\nreturn fmt.Errorf(\"mounting %v: %v\", m, err)\n" } ]
Go
Apache License 2.0
google/gvisor
Mount root and volumes as read-only if --overlay is enabled PiperOrigin-RevId: 315583963
259,992
09.06.2020 16:34:42
25,200
6722b1e56fa63f3409f222a63241705aa3f3ace4
Don't WriteOut to readonly mounts When the file closes, it attempts to write dirty cached attributes to the file. This should not be done when the mount is readonly.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/gofer/inode.go", "new_path": "pkg/sentry/fs/gofer/inode.go", "diff": "@@ -640,7 +640,7 @@ func (i *inodeOperations) Allocate(ctx context.Context, inode *fs.Inode, offset,\n// WriteOut implements fs.InodeOperations.WriteOut.\nfunc (i *inodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error {\n- if !i.session().cachePolicy.cacheUAttrs(inode) {\n+ if inode.MountSource.Flags.ReadOnly || !i.session().cachePolicy.cacheUAttrs(inode) {\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/inode.go", "new_path": "pkg/sentry/fs/host/inode.go", "diff": "@@ -368,6 +368,9 @@ func (i *inodeOperations) Allocate(ctx context.Context, inode *fs.Inode, offset,\n// WriteOut implements fs.InodeOperations.WriteOut.\nfunc (i *inodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error {\n+ if inode.MountSource.Flags.ReadOnly {\n+ return nil\n+ }\n// Have we been using host kernel metadata caches?\nif !inode.MountSource.Flags.ForcePageCache || !canMap(inode) {\n// Then the metadata is already up to date on the host.\n" } ]
Go
Apache License 2.0
google/gvisor
Don't WriteOut to readonly mounts When the file closes, it attempts to write dirty cached attributes to the file. This should not be done when the mount is readonly. PiperOrigin-RevId: 315585058
259,992
10.06.2020 06:50:10
25,200
203dc121f6fb3122e3bc0f234f4b4e4b276fb3b0
Redirect TODOs to more specific issues Closes
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/loader/vdso.go", "new_path": "pkg/sentry/loader/vdso.go", "diff": "@@ -75,7 +75,7 @@ var _ fs.FileOperations = (*byteReader)(nil)\n// newByteReaderFile creates a fake file to read data from.\n//\n-// TODO(gvisor.dev/issue/1623): Convert to VFS2.\n+// TODO(gvisor.dev/issue/2921): Convert to VFS2.\nfunc newByteReaderFile(ctx context.Context, data []byte) *fs.File {\n// Create a fake inode.\ninode := fs.NewInode(\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "diff": "@@ -168,7 +168,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nerr := tmpfs.AddSeals(file, args[2].Uint())\nreturn 0, nil, err\ndefault:\n- // TODO(gvisor.dev/issue/1623): Everything else is not yet supported.\n+ // TODO(gvisor.dev/issue/2920): Everything else is not yet supported.\nreturn 0, nil, syserror.EINVAL\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Redirect TODOs to more specific issues Closes #1623 PiperOrigin-RevId: 315681993
259,881
10.06.2020 12:37:01
25,200
fadbfd83d9e7f7b00fcffdaf8532e006327c74ad
Include panic message in log
[ { "change_type": "MODIFY", "old_path": "pkg/p9/server.go", "new_path": "pkg/p9/server.go", "diff": "@@ -482,10 +482,10 @@ func (cs *connState) handle(m message) (r message) {\ndefer func() {\nif r == nil {\n// Don't allow a panic to propagate.\n- recover()\n+ err := recover()\n// Include a useful log message.\n- log.Warningf(\"panic in handler: %s\", debug.Stack())\n+ log.Warningf(\"panic in handler: %v\\n%s\", err, debug.Stack())\n// Wrap in an EFAULT error; we don't really have a\n// better way to describe this kind of error. It will\n" } ]
Go
Apache License 2.0
google/gvisor
Include panic message in log PiperOrigin-RevId: 315745386
259,853
10.06.2020 13:25:17
25,200
a5a4f804879f8d1b5e6de6005aef6d3e14e7dca2
socket/unix: handle sendto address argument for connected sockets In case of SOCK_SEQPACKET, it has to be ignored. In case of SOCK_STREAM, EISCONN or EOPNOTSUPP has to be returned.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/unix/unix.go", "new_path": "pkg/sentry/socket/unix/unix.go", "diff": "@@ -459,6 +459,15 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b\nTo: nil,\n}\nif len(to) > 0 {\n+ switch s.stype {\n+ case linux.SOCK_SEQPACKET:\n+ to = nil\n+ case linux.SOCK_STREAM:\n+ if s.State() == linux.SS_CONNECTED {\n+ return 0, syserr.ErrAlreadyConnected\n+ }\n+ return 0, syserr.ErrNotSupported\n+ default:\nep, err := extractEndpoint(t, to)\nif err != nil {\nreturn 0, err\n@@ -470,6 +479,7 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b\nw.Control.Credentials = control.MakeCreds(t)\n}\n}\n+ }\nn, err := src.CopyInTo(t, &w)\nif err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix_seqpacket.cc", "new_path": "test/syscalls/linux/socket_unix_seqpacket.cc", "diff": "@@ -43,6 +43,24 @@ TEST_P(SeqpacketUnixSocketPairTest, ReadOneSideClosed) {\nSyscallSucceedsWithValue(0));\n}\n+TEST_P(SeqpacketUnixSocketPairTest, Sendto) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct sockaddr_un addr = {};\n+ addr.sun_family = AF_UNIX;\n+ constexpr char kPath[] = \"\\0nonexistent\";\n+ memcpy(addr.sun_path, kPath, sizeof(kPath));\n+\n+ constexpr char kStr[] = \"abc\";\n+ ASSERT_THAT(sendto(sockets->second_fd(), kStr, 3, 0, (struct sockaddr*)&addr,\n+ sizeof(addr)),\n+ SyscallSucceedsWithValue(3));\n+\n+ char data[10] = {};\n+ ASSERT_THAT(read(sockets->first_fd(), data, sizeof(data)),\n+ SyscallSucceedsWithValue(3));\n+}\n+\n} // namespace\n} // namespace testing\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_unix_stream.cc", "new_path": "test/syscalls/linux/socket_unix_stream.cc", "diff": "@@ -89,6 +89,20 @@ TEST_P(StreamUnixSocketPairTest, ReadOneSideClosedWithUnreadData) {\nSyscallFailsWithErrno(ECONNRESET));\n}\n+TEST_P(StreamUnixSocketPairTest, Sendto) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ struct sockaddr_un addr = {};\n+ addr.sun_family = AF_UNIX;\n+ constexpr char kPath[] = \"\\0nonexistent\";\n+ memcpy(addr.sun_path, kPath, sizeof(kPath));\n+\n+ constexpr char kStr[] = \"abc\";\n+ ASSERT_THAT(sendto(sockets->second_fd(), kStr, 3, 0, (struct sockaddr*)&addr,\n+ sizeof(addr)),\n+ SyscallFailsWithErrno(EISCONN));\n+}\n+\nINSTANTIATE_TEST_SUITE_P(\nAllUnixDomainSockets, StreamUnixSocketPairTest,\n::testing::ValuesIn(IncludeReversals(VecCat<SocketPairKind>(\n" } ]
Go
Apache License 2.0
google/gvisor
socket/unix: handle sendto address argument for connected sockets In case of SOCK_SEQPACKET, it has to be ignored. In case of SOCK_STREAM, EISCONN or EOPNOTSUPP has to be returned. PiperOrigin-RevId: 315755972
259,896
10.06.2020 13:36:02
25,200
4b9652d63b319414e764696f1b77ee39cd36d96d
{S,G}etsockopt for TCP_KEEPCNT option. TCP_KEEPCNT is used to set the maximum keepalive probes to be sent before dropping the connection. WANT_LGTM=jchacon
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/tcp.go", "new_path": "pkg/abi/linux/tcp.go", "diff": "@@ -57,4 +57,5 @@ const (\nconst (\nMAX_TCP_KEEPIDLE = 32767\nMAX_TCP_KEEPINTVL = 32767\n+ MAX_TCP_KEEPCNT = 127\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/netstack.go", "new_path": "pkg/sentry/socket/netstack/netstack.go", "diff": "@@ -1246,6 +1246,18 @@ func getSockOptTCP(t *kernel.Task, ep commonEndpoint, name, outLen int) (interfa\nreturn int32(time.Duration(v) / time.Second), nil\n+ case linux.TCP_KEEPCNT:\n+ if outLen < sizeOfInt32 {\n+ return nil, syserr.ErrInvalidArgument\n+ }\n+\n+ v, err := ep.GetSockOptInt(tcpip.KeepaliveCountOption)\n+ if err != nil {\n+ return nil, syserr.TranslateNetstackError(err)\n+ }\n+\n+ return int32(v), nil\n+\ncase linux.TCP_USER_TIMEOUT:\nif outLen < sizeOfInt32 {\nreturn nil, syserr.ErrInvalidArgument\n@@ -1786,6 +1798,17 @@ func setSockOptTCP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) *\n}\nreturn syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.KeepaliveIntervalOption(time.Second * time.Duration(v))))\n+ case linux.TCP_KEEPCNT:\n+ if len(optVal) < sizeOfInt32 {\n+ return syserr.ErrInvalidArgument\n+ }\n+\n+ v := usermem.ByteOrder.Uint32(optVal)\n+ if v < 1 || v > linux.MAX_TCP_KEEPCNT {\n+ return syserr.ErrInvalidArgument\n+ }\n+ return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.KeepaliveCountOption, int(v)))\n+\ncase linux.TCP_USER_TIMEOUT:\nif len(optVal) < sizeOfInt32 {\nreturn syserr.ErrInvalidArgument\n@@ -2115,30 +2138,20 @@ func emitUnimplementedEventTCP(t *kernel.Task, name int) {\nswitch name {\ncase linux.TCP_CONGESTION,\nlinux.TCP_CORK,\n- linux.TCP_DEFER_ACCEPT,\nlinux.TCP_FASTOPEN,\nlinux.TCP_FASTOPEN_CONNECT,\nlinux.TCP_FASTOPEN_KEY,\nlinux.TCP_FASTOPEN_NO_COOKIE,\n- linux.TCP_KEEPCNT,\n- linux.TCP_KEEPIDLE,\n- linux.TCP_KEEPINTVL,\n- linux.TCP_LINGER2,\n- linux.TCP_MAXSEG,\nlinux.TCP_QUEUE_SEQ,\n- linux.TCP_QUICKACK,\nlinux.TCP_REPAIR,\nlinux.TCP_REPAIR_QUEUE,\nlinux.TCP_REPAIR_WINDOW,\nlinux.TCP_SAVED_SYN,\nlinux.TCP_SAVE_SYN,\n- linux.TCP_SYNCNT,\nlinux.TCP_THIN_DUPACK,\nlinux.TCP_THIN_LINEAR_TIMEOUTS,\nlinux.TCP_TIMESTAMP,\n- linux.TCP_ULP,\n- linux.TCP_USER_TIMEOUT,\n- linux.TCP_WINDOW_CLAMP:\n+ linux.TCP_ULP:\nt.Kernel().EmitUnimplementedEvent(t)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_tcp_generic.cc", "new_path": "test/syscalls/linux/socket_ip_tcp_generic.cc", "diff": "@@ -524,6 +524,7 @@ TEST_P(TCPSocketPairTest, SetTCPKeepintvlZero) {\n// Copied from include/net/tcp.h.\nconstexpr int MAX_TCP_KEEPIDLE = 32767;\nconstexpr int MAX_TCP_KEEPINTVL = 32767;\n+constexpr int MAX_TCP_KEEPCNT = 127;\nTEST_P(TCPSocketPairTest, SetTCPKeepidleAboveMax) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n@@ -575,6 +576,78 @@ TEST_P(TCPSocketPairTest, SetTCPKeepintvlToMax) {\nEXPECT_EQ(get, MAX_TCP_KEEPINTVL);\n}\n+TEST_P(TCPSocketPairTest, TCPKeepcountDefault) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int get = -1;\n+ socklen_t get_len = sizeof(get);\n+ EXPECT_THAT(\n+ getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &get, &get_len),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(get_len, sizeof(get));\n+ EXPECT_EQ(get, 9); // 9 keepalive probes.\n+}\n+\n+TEST_P(TCPSocketPairTest, SetTCPKeepcountZero) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ constexpr int kZero = 0;\n+ EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &kZero,\n+ sizeof(kZero)),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST_P(TCPSocketPairTest, SetTCPKeepcountAboveMax) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ constexpr int kAboveMax = MAX_TCP_KEEPCNT + 1;\n+ EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,\n+ &kAboveMax, sizeof(kAboveMax)),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\n+TEST_P(TCPSocketPairTest, SetTCPKeepcountToMax) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,\n+ &MAX_TCP_KEEPCNT, sizeof(MAX_TCP_KEEPCNT)),\n+ SyscallSucceedsWithValue(0));\n+\n+ int get = -1;\n+ socklen_t get_len = sizeof(get);\n+ EXPECT_THAT(\n+ getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &get, &get_len),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(get_len, sizeof(get));\n+ EXPECT_EQ(get, MAX_TCP_KEEPCNT);\n+}\n+\n+TEST_P(TCPSocketPairTest, SetTCPKeepcountToOne) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int keepaliveCount = 1;\n+ EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,\n+ &keepaliveCount, sizeof(keepaliveCount)),\n+ SyscallSucceedsWithValue(0));\n+\n+ int get = -1;\n+ socklen_t get_len = sizeof(get);\n+ EXPECT_THAT(\n+ getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT, &get, &get_len),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(get_len, sizeof(get));\n+ EXPECT_EQ(get, keepaliveCount);\n+}\n+\n+TEST_P(TCPSocketPairTest, SetTCPKeepcountToNegative) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int keepaliveCount = -5;\n+ EXPECT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_KEEPCNT,\n+ &keepaliveCount, sizeof(keepaliveCount)),\n+ SyscallFailsWithErrno(EINVAL));\n+}\n+\nTEST_P(TCPSocketPairTest, SetOOBInline) {\nauto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n" } ]
Go
Apache License 2.0
google/gvisor
{S,G}etsockopt for TCP_KEEPCNT option. TCP_KEEPCNT is used to set the maximum keepalive probes to be sent before dropping the connection. WANT_LGTM=jchacon PiperOrigin-RevId: 315758094
259,896
10.06.2020 15:05:20
25,200
9338854ea31059d6b6b5bf59a12512455b632f49
Fix the error code for syscall test with null TOS. The setsockopt with nullptr can fail with either EFAULT or zero.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_unbound.cc", "new_path": "test/syscalls/linux/socket_ip_unbound.cc", "diff": "@@ -377,8 +377,11 @@ TEST_P(IPUnboundSocketTest, NullTOS) {\n//\n// Linux's implementation would need fixing as passing a nullptr as optval\n// and non-zero optlen may not be valid.\n- EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, nullptr, set_sz),\n- SyscallSucceedsWithValue(0));\n+ // TODO(b/158666797): Combine the gVisor and linux cases for IPv6.\n+ // Some kernel versions return EFAULT, so we handle both.\n+ EXPECT_THAT(\n+ setsockopt(socket->get(), t.level, t.option, nullptr, set_sz),\n+ AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(0)));\n}\n}\nsocklen_t get_sz = sizeof(int);\n" } ]
Go
Apache License 2.0
google/gvisor
Fix the error code for syscall test with null TOS. The setsockopt with nullptr can fail with either EFAULT or zero. PiperOrigin-RevId: 315777107
259,992
10.06.2020 16:28:20
25,200
41d9e536d559a2066ed6c92ed30aa2757d3596bb
Skip pids.max if value is zero LinuxPids.Limit is the only optional cgroup field in OCI that is not a pointer. If value is 0 or negative it should be skipped.
[ { "change_type": "MODIFY", "old_path": "runsc/cgroup/cgroup.go", "new_path": "runsc/cgroup/cgroup.go", "diff": "@@ -545,7 +545,7 @@ func (*networkPrio) set(spec *specs.LinuxResources, path string) error {\ntype pids struct{}\nfunc (*pids) set(spec *specs.LinuxResources, path string) error {\n- if spec.Pids == nil {\n+ if spec.Pids == nil || spec.Pids.Limit <= 0 {\nreturn nil\n}\nval := strconv.FormatInt(spec.Pids.Limit, 10)\n" } ]
Go
Apache License 2.0
google/gvisor
Skip pids.max if value is zero LinuxPids.Limit is the only optional cgroup field in OCI that is not a pointer. If value is 0 or negative it should be skipped. PiperOrigin-RevId: 315791909
259,992
10.06.2020 17:03:38
25,200
ab4c85189313097dbac5d5531f9ff6a08d9ba289
Cgroup fixes Set hugetlb related fields Add realtime scheduler related fields Beef up unit tests Updates
[ { "change_type": "MODIFY", "old_path": "runsc/cgroup/BUILD", "new_path": "runsc/cgroup/BUILD", "diff": "@@ -20,4 +20,8 @@ go_test(\nsrcs = [\"cgroup_test.go\"],\nlibrary = \":cgroup\",\ntags = [\"local\"],\n+ deps = [\n+ \"//pkg/test/testutil\",\n+ \"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n+ ],\n)\n" }, { "change_type": "MODIFY", "old_path": "runsc/cgroup/cgroup.go", "new_path": "runsc/cgroup/cgroup.go", "diff": "@@ -43,6 +43,7 @@ var controllers = map[string]config{\n\"blkio\": config{ctrlr: &blockIO{}},\n\"cpu\": config{ctrlr: &cpu{}},\n\"cpuset\": config{ctrlr: &cpuSet{}},\n+ \"hugetlb\": config{ctrlr: &hugeTLB{}, optional: true},\n\"memory\": config{ctrlr: &memory{}},\n\"net_cls\": config{ctrlr: &networkClass{}},\n\"net_prio\": config{ctrlr: &networkPrio{}},\n@@ -52,7 +53,6 @@ var controllers = map[string]config{\n// irrelevant for a sandbox.\n\"devices\": config{ctrlr: &noop{}},\n\"freezer\": config{ctrlr: &noop{}},\n- \"hugetlb\": config{ctrlr: &noop{}, optional: true},\n\"perf_event\": config{ctrlr: &noop{}},\n\"rdma\": config{ctrlr: &noop{}, optional: true},\n\"systemd\": config{ctrlr: &noop{}},\n@@ -446,7 +446,13 @@ func (*cpu) set(spec *specs.LinuxResources, path string) error {\nif err := setOptionalValueInt(path, \"cpu.cfs_quota_us\", spec.CPU.Quota); err != nil {\nreturn err\n}\n- return setOptionalValueUint(path, \"cpu.cfs_period_us\", spec.CPU.Period)\n+ if err := setOptionalValueUint(path, \"cpu.cfs_period_us\", spec.CPU.Period); err != nil {\n+ return err\n+ }\n+ if err := setOptionalValueUint(path, \"cpu.rt_period_us\", spec.CPU.RealtimePeriod); err != nil {\n+ return err\n+ }\n+ return setOptionalValueInt(path, \"cpu.rt_runtime_us\", spec.CPU.RealtimeRuntime)\n}\ntype cpuSet struct{}\n@@ -487,15 +493,19 @@ func (*blockIO) set(spec *specs.LinuxResources, path string) error {\n}\nfor _, dev := range spec.BlockIO.WeightDevice {\n- val := fmt.Sprintf(\"%d:%d %d\", dev.Major, dev.Minor, dev.Weight)\n+ if dev.Weight != nil {\n+ val := fmt.Sprintf(\"%d:%d %d\", dev.Major, dev.Minor, *dev.Weight)\nif err := setValue(path, \"blkio.weight_device\", val); err != nil {\nreturn err\n}\n- val = fmt.Sprintf(\"%d:%d %d\", dev.Major, dev.Minor, dev.LeafWeight)\n+ }\n+ if dev.LeafWeight != nil {\n+ val := fmt.Sprintf(\"%d:%d %d\", dev.Major, dev.Minor, *dev.LeafWeight)\nif err := setValue(path, \"blkio.leaf_weight_device\", val); err != nil {\nreturn err\n}\n}\n+ }\nif err := setThrottle(path, \"blkio.throttle.read_bps_device\", spec.BlockIO.ThrottleReadBpsDevice); err != nil {\nreturn err\n}\n@@ -551,3 +561,16 @@ func (*pids) set(spec *specs.LinuxResources, path string) error {\nval := strconv.FormatInt(spec.Pids.Limit, 10)\nreturn setValue(path, \"pids.max\", val)\n}\n+\n+type hugeTLB struct{}\n+\n+func (*hugeTLB) set(spec *specs.LinuxResources, path string) error {\n+ for _, limit := range spec.HugepageLimits {\n+ name := fmt.Sprintf(\"hugetlb.%s.limit_in_bytes\", limit.Pagesize)\n+ val := strconv.FormatUint(limit.Limit, 10)\n+ if err := setValue(path, name, val); err != nil {\n+ return err\n+ }\n+ }\n+ return nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "runsc/cgroup/cgroup_test.go", "new_path": "runsc/cgroup/cgroup_test.go", "diff": "package cgroup\nimport (\n+ \"io/ioutil\"\n+ \"os\"\n+ \"path/filepath\"\n+ \"strings\"\n\"testing\"\n+\n+ specs \"github.com/opencontainers/runtime-spec/specs-go\"\n+ \"gvisor.dev/gvisor/pkg/test/testutil\"\n)\nfunc TestUninstallEnoent(t *testing.T) {\n@@ -65,3 +72,578 @@ func TestCountCpuset(t *testing.T) {\n})\n}\n}\n+\n+func uint16Ptr(v uint16) *uint16 {\n+ return &v\n+}\n+\n+func uint32Ptr(v uint32) *uint32 {\n+ return &v\n+}\n+\n+func int64Ptr(v int64) *int64 {\n+ return &v\n+}\n+\n+func uint64Ptr(v uint64) *uint64 {\n+ return &v\n+}\n+\n+func boolPtr(v bool) *bool {\n+ return &v\n+}\n+\n+func checkDir(t *testing.T, dir string, contents map[string]string) {\n+ all, err := ioutil.ReadDir(dir)\n+ if err != nil {\n+ t.Fatalf(\"ReadDir(%q): %v\", dir, err)\n+ }\n+ fileCount := 0\n+ for _, file := range all {\n+ if file.IsDir() {\n+ // Only want to compare files.\n+ continue\n+ }\n+ fileCount++\n+\n+ want, ok := contents[file.Name()]\n+ if !ok {\n+ t.Errorf(\"file not expected: %q\", file.Name())\n+ continue\n+ }\n+ gotBytes, err := ioutil.ReadFile(filepath.Join(dir, file.Name()))\n+ if err != nil {\n+ t.Fatal(err.Error())\n+ }\n+ got := strings.TrimSuffix(string(gotBytes), \"\\n\")\n+ if got != want {\n+ t.Errorf(\"wrong file content, file: %q, want: %q, got: %q\", file.Name(), want, got)\n+ }\n+ }\n+ if fileCount != len(contents) {\n+ t.Errorf(\"file is missing, want: %v, got: %v\", contents, all)\n+ }\n+}\n+\n+func makeLinuxWeightDevice(major, minor int64, weight, leafWeight *uint16) specs.LinuxWeightDevice {\n+ rv := specs.LinuxWeightDevice{\n+ Weight: weight,\n+ LeafWeight: leafWeight,\n+ }\n+ rv.Major = major\n+ rv.Minor = minor\n+ return rv\n+}\n+\n+func makeLinuxThrottleDevice(major, minor int64, rate uint64) specs.LinuxThrottleDevice {\n+ rv := specs.LinuxThrottleDevice{\n+ Rate: rate,\n+ }\n+ rv.Major = major\n+ rv.Minor = minor\n+ return rv\n+}\n+\n+func TestBlockIO(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxBlockIO\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"simple\",\n+ spec: &specs.LinuxBlockIO{\n+ Weight: uint16Ptr(1),\n+ LeafWeight: uint16Ptr(2),\n+ },\n+ wants: map[string]string{\n+ \"blkio.weight\": \"1\",\n+ \"blkio.leaf_weight\": \"2\",\n+ },\n+ },\n+ {\n+ name: \"weight_device\",\n+ spec: &specs.LinuxBlockIO{\n+ WeightDevice: []specs.LinuxWeightDevice{\n+ makeLinuxWeightDevice(1, 2, uint16Ptr(3), uint16Ptr(4)),\n+ },\n+ },\n+ wants: map[string]string{\n+ \"blkio.weight_device\": \"1:2 3\",\n+ \"blkio.leaf_weight_device\": \"1:2 4\",\n+ },\n+ },\n+ {\n+ name: \"weight_device_nil_values\",\n+ spec: &specs.LinuxBlockIO{\n+ WeightDevice: []specs.LinuxWeightDevice{\n+ makeLinuxWeightDevice(1, 2, nil, nil),\n+ },\n+ },\n+ },\n+ {\n+ name: \"throttle\",\n+ spec: &specs.LinuxBlockIO{\n+ ThrottleReadBpsDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(1, 2, 3),\n+ },\n+ ThrottleReadIOPSDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(4, 5, 6),\n+ },\n+ ThrottleWriteBpsDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(7, 8, 9),\n+ },\n+ ThrottleWriteIOPSDevice: []specs.LinuxThrottleDevice{\n+ makeLinuxThrottleDevice(10, 11, 12),\n+ },\n+ },\n+ wants: map[string]string{\n+ \"blkio.throttle.read_bps_device\": \"1:2 3\",\n+ \"blkio.throttle.read_iops_device\": \"4:5 6\",\n+ \"blkio.throttle.write_bps_device\": \"7:8 9\",\n+ \"blkio.throttle.write_iops_device\": \"10:11 12\",\n+ },\n+ },\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxBlockIO{},\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ BlockIO: tc.spec,\n+ }\n+ ctrlr := blockIO{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n+\n+func TestCPU(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxCPU\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"all\",\n+ spec: &specs.LinuxCPU{\n+ Shares: uint64Ptr(1),\n+ Quota: int64Ptr(2),\n+ Period: uint64Ptr(3),\n+ RealtimeRuntime: int64Ptr(4),\n+ RealtimePeriod: uint64Ptr(5),\n+ },\n+ wants: map[string]string{\n+ \"cpu.shares\": \"1\",\n+ \"cpu.cfs_quota_us\": \"2\",\n+ \"cpu.cfs_period_us\": \"3\",\n+ \"cpu.rt_runtime_us\": \"4\",\n+ \"cpu.rt_period_us\": \"5\",\n+ },\n+ },\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxCPU{},\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ CPU: tc.spec,\n+ }\n+ ctrlr := cpu{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n+\n+func TestCPUSet(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxCPU\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"all\",\n+ spec: &specs.LinuxCPU{\n+ Cpus: \"foo\",\n+ Mems: \"bar\",\n+ },\n+ wants: map[string]string{\n+ \"cpuset.cpus\": \"foo\",\n+ \"cpuset.mems\": \"bar\",\n+ },\n+ },\n+ // Don't test nil values because they are copied from the parent.\n+ // See TestCPUSetAncestor().\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ CPU: tc.spec,\n+ }\n+ ctrlr := cpuSet{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n+\n+// TestCPUSetAncestor checks that, when not available, value is read from\n+// parent directory.\n+func TestCPUSetAncestor(t *testing.T) {\n+ // Prepare master directory with cgroup files that will be propagated to\n+ // children.\n+ grandpa, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(grandpa)\n+\n+ if err := ioutil.WriteFile(filepath.Join(grandpa, \"cpuset.cpus\"), []byte(\"parent-cpus\"), 0666); err != nil {\n+ t.Fatalf(\"ioutil.WriteFile(): %v\", err)\n+ }\n+ if err := ioutil.WriteFile(filepath.Join(grandpa, \"cpuset.mems\"), []byte(\"parent-mems\"), 0666); err != nil {\n+ t.Fatalf(\"ioutil.WriteFile(): %v\", err)\n+ }\n+\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxCPU\n+ }{\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxCPU{},\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ // Create empty files in intermediate directory. They should be ignored\n+ // when reading, and then populated from parent.\n+ parent, err := ioutil.TempDir(grandpa, \"parent\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(parent)\n+ if _, err := os.Create(filepath.Join(parent, \"cpuset.cpus\")); err != nil {\n+ t.Fatalf(\"os.Create(): %v\", err)\n+ }\n+ if _, err := os.Create(filepath.Join(parent, \"cpuset.mems\")); err != nil {\n+ t.Fatalf(\"os.Create(): %v\", err)\n+ }\n+\n+ // cgroup files mmust exist.\n+ dir, err := ioutil.TempDir(parent, \"child\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ if _, err := os.Create(filepath.Join(dir, \"cpuset.cpus\")); err != nil {\n+ t.Fatalf(\"os.Create(): %v\", err)\n+ }\n+ if _, err := os.Create(filepath.Join(dir, \"cpuset.mems\")); err != nil {\n+ t.Fatalf(\"os.Create(): %v\", err)\n+ }\n+\n+ spec := &specs.LinuxResources{\n+ CPU: tc.spec,\n+ }\n+ ctrlr := cpuSet{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ want := map[string]string{\n+ \"cpuset.cpus\": \"parent-cpus\",\n+ \"cpuset.mems\": \"parent-mems\",\n+ }\n+ // Both path and dir must have been populated from grandpa.\n+ checkDir(t, parent, want)\n+ checkDir(t, dir, want)\n+ })\n+ }\n+}\n+\n+func TestHugeTlb(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec []specs.LinuxHugepageLimit\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"single\",\n+ spec: []specs.LinuxHugepageLimit{\n+ {\n+ Pagesize: \"1G\",\n+ Limit: 123,\n+ },\n+ },\n+ wants: map[string]string{\n+ \"hugetlb.1G.limit_in_bytes\": \"123\",\n+ },\n+ },\n+ {\n+ name: \"multiple\",\n+ spec: []specs.LinuxHugepageLimit{\n+ {\n+ Pagesize: \"1G\",\n+ Limit: 123,\n+ },\n+ {\n+ Pagesize: \"2G\",\n+ Limit: 456,\n+ },\n+ {\n+ Pagesize: \"1P\",\n+ Limit: 789,\n+ },\n+ },\n+ wants: map[string]string{\n+ \"hugetlb.1G.limit_in_bytes\": \"123\",\n+ \"hugetlb.2G.limit_in_bytes\": \"456\",\n+ \"hugetlb.1P.limit_in_bytes\": \"789\",\n+ },\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ HugepageLimits: tc.spec,\n+ }\n+ ctrlr := hugeTLB{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n+\n+func TestMemory(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxMemory\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"all\",\n+ spec: &specs.LinuxMemory{\n+ Limit: int64Ptr(1),\n+ Reservation: int64Ptr(2),\n+ Swap: int64Ptr(3),\n+ Kernel: int64Ptr(4),\n+ KernelTCP: int64Ptr(5),\n+ Swappiness: uint64Ptr(6),\n+ DisableOOMKiller: boolPtr(true),\n+ },\n+ wants: map[string]string{\n+ \"memory.limit_in_bytes\": \"1\",\n+ \"memory.soft_limit_in_bytes\": \"2\",\n+ \"memory.memsw.limit_in_bytes\": \"3\",\n+ \"memory.kmem.limit_in_bytes\": \"4\",\n+ \"memory.kmem.tcp.limit_in_bytes\": \"5\",\n+ \"memory.swappiness\": \"6\",\n+ \"memory.oom_control\": \"1\",\n+ },\n+ },\n+ {\n+ // Disable OOM killer should only write when set to true.\n+ name: \"oomkiller\",\n+ spec: &specs.LinuxMemory{\n+ DisableOOMKiller: boolPtr(false),\n+ },\n+ },\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxMemory{},\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ Memory: tc.spec,\n+ }\n+ ctrlr := memory{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n+\n+func TestNetworkClass(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxNetwork\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"all\",\n+ spec: &specs.LinuxNetwork{\n+ ClassID: uint32Ptr(1),\n+ },\n+ wants: map[string]string{\n+ \"net_cls.classid\": \"1\",\n+ },\n+ },\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxNetwork{},\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ Network: tc.spec,\n+ }\n+ ctrlr := networkClass{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n+\n+func TestNetworkPriority(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxNetwork\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"all\",\n+ spec: &specs.LinuxNetwork{\n+ Priorities: []specs.LinuxInterfacePriority{\n+ {\n+ Name: \"foo\",\n+ Priority: 1,\n+ },\n+ },\n+ },\n+ wants: map[string]string{\n+ \"net_prio.ifpriomap\": \"foo 1\",\n+ },\n+ },\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxNetwork{},\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ Network: tc.spec,\n+ }\n+ ctrlr := networkPrio{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n+\n+func TestPids(t *testing.T) {\n+ for _, tc := range []struct {\n+ name string\n+ spec *specs.LinuxPids\n+ wants map[string]string\n+ }{\n+ {\n+ name: \"all\",\n+ spec: &specs.LinuxPids{Limit: 1},\n+ wants: map[string]string{\n+ \"pids.max\": \"1\",\n+ },\n+ },\n+ {\n+ name: \"nil_values\",\n+ spec: &specs.LinuxPids{},\n+ },\n+ {\n+ name: \"nil\",\n+ },\n+ } {\n+ t.Run(tc.name, func(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"cgroup\")\n+ if err != nil {\n+ t.Fatalf(\"error creating temporary directory: %v\", err)\n+ }\n+ defer os.RemoveAll(dir)\n+\n+ spec := &specs.LinuxResources{\n+ Pids: tc.spec,\n+ }\n+ ctrlr := pids{}\n+ if err := ctrlr.set(spec, dir); err != nil {\n+ t.Fatalf(\"ctrlr.set(): %v\", err)\n+ }\n+ checkDir(t, dir, tc.wants)\n+ })\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Cgroup fixes - Set hugetlb related fields - Add realtime scheduler related fields - Beef up unit tests Updates #2713 PiperOrigin-RevId: 315797979
259,881
11.06.2020 09:11:44
25,200
0c7a5bc69c19de59c426045d10a56613f992f0dd
Add nogo TODO.
[ { "change_type": "MODIFY", "old_path": "tools/defs.bzl", "new_path": "tools/defs.bzl", "diff": "@@ -96,6 +96,7 @@ def go_imports(name, src, out):\ncmd = (\"$(location @org_golang_x_tools//cmd/goimports:goimports) $(SRCS) > $@\"),\n)\n+# TODO(b/158696872): Enable nogo by default.\ndef go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = False, marshal_debug = False, nogo = False, **kwargs):\n\"\"\"Wraps the standard go_library and does stateification and marshalling.\n" } ]
Go
Apache License 2.0
google/gvisor
Add nogo TODO. PiperOrigin-RevId: 315911025
259,992
11.06.2020 14:55:04
25,200
d58d57606a460d49b8870d2c48cb75f662f65fda
Don't copy structs with sync.Mutex during initialization During inititalization inode struct was copied around, but it isn't great pratice to copy it around since it contains ref count and sync.Mutex. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/block_map_file.go", "new_path": "pkg/sentry/fsimpl/ext/block_map_file.go", "diff": "@@ -58,15 +58,16 @@ var _ io.ReaderAt = (*blockMapFile)(nil)\n// newBlockMapFile is the blockMapFile constructor. It initializes the file to\n// physical blocks map with (at most) the first 12 (direct) blocks.\n-func newBlockMapFile(regFile regularFile) (*blockMapFile, error) {\n- file := &blockMapFile{regFile: regFile}\n+func newBlockMapFile(args inodeArgs) (*blockMapFile, error) {\n+ file := &blockMapFile{}\nfile.regFile.impl = file\n+ file.regFile.inode.init(args, &file.regFile)\nfor i := uint(0); i < 4; i++ {\n- file.coverage[i] = getCoverage(regFile.inode.blkSize, i)\n+ file.coverage[i] = getCoverage(file.regFile.inode.blkSize, i)\n}\n- blkMap := regFile.inode.diskInode.Data()\n+ blkMap := file.regFile.inode.diskInode.Data()\nbinary.Unmarshal(blkMap[:numDirectBlks*4], binary.LittleEndian, &file.directBlks)\nbinary.Unmarshal(blkMap[numDirectBlks*4:(numDirectBlks+1)*4], binary.LittleEndian, &file.indirectBlk)\nbinary.Unmarshal(blkMap[(numDirectBlks+1)*4:(numDirectBlks+2)*4], binary.LittleEndian, &file.doubleIndirectBlk)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/block_map_test.go", "new_path": "pkg/sentry/fsimpl/ext/block_map_test.go", "diff": "@@ -85,20 +85,6 @@ func (n *blkNumGen) next() uint32 {\n// the inode covers and that is written to disk.\nfunc blockMapSetUp(t *testing.T) (*blockMapFile, []byte) {\nmockDisk := make([]byte, mockBMDiskSize)\n- regFile := regularFile{\n- inode: inode{\n- fs: &filesystem{\n- dev: bytes.NewReader(mockDisk),\n- },\n- diskInode: &disklayout.InodeNew{\n- InodeOld: disklayout.InodeOld{\n- SizeLo: getMockBMFileFize(),\n- },\n- },\n- blkSize: uint64(mockBMBlkSize),\n- },\n- }\n-\nvar fileData []byte\nblkNums := newBlkNumGen()\nvar data []byte\n@@ -125,9 +111,20 @@ func blockMapSetUp(t *testing.T) (*blockMapFile, []byte) {\ndata = binary.Marshal(data, binary.LittleEndian, triplyIndirectBlk)\nfileData = append(fileData, writeFileDataToBlock(mockDisk, triplyIndirectBlk, 3, blkNums)...)\n- copy(regFile.inode.diskInode.Data(), data)\n+ args := inodeArgs{\n+ fs: &filesystem{\n+ dev: bytes.NewReader(mockDisk),\n+ },\n+ diskInode: &disklayout.InodeNew{\n+ InodeOld: disklayout.InodeOld{\n+ SizeLo: getMockBMFileFize(),\n+ },\n+ },\n+ blkSize: uint64(mockBMBlkSize),\n+ }\n+ copy(args.diskInode.Data(), data)\n- mockFile, err := newBlockMapFile(regFile)\n+ mockFile, err := newBlockMapFile(args)\nif err != nil {\nt.Fatalf(\"newBlockMapFile failed: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/directory.go", "new_path": "pkg/sentry/fsimpl/ext/directory.go", "diff": "@@ -54,16 +54,15 @@ type directory struct {\n}\n// newDirectory is the directory constructor.\n-func newDirectory(inode inode, newDirent bool) (*directory, error) {\n+func newDirectory(args inodeArgs, newDirent bool) (*directory, error) {\nfile := &directory{\n- inode: inode,\nchildCache: make(map[string]*dentry),\nchildMap: make(map[string]*dirent),\n}\n- file.inode.impl = file\n+ file.inode.init(args, file)\n// Initialize childList by reading dirents from the underlying file.\n- if inode.diskInode.Flags().Index {\n+ if args.diskInode.Flags().Index {\n// TODO(b/134676337): Support hash tree directories. Currently only the '.'\n// and '..' entries are read in.\n@@ -74,7 +73,7 @@ func newDirectory(inode inode, newDirent bool) (*directory, error) {\n// The dirents are organized in a linear array in the file data.\n// Extract the file data and decode the dirents.\n- regFile, err := newRegularFile(inode)\n+ regFile, err := newRegularFile(args)\nif err != nil {\nreturn nil, err\n}\n@@ -82,7 +81,7 @@ func newDirectory(inode inode, newDirent bool) (*directory, error) {\n// buf is used as scratch space for reading in dirents from disk and\n// unmarshalling them into dirent structs.\nbuf := make([]byte, disklayout.DirentSize)\n- size := inode.diskInode.Size()\n+ size := args.diskInode.Size()\nfor off, inc := uint64(0), uint64(0); off < size; off += inc {\ntoRead := size - off\nif toRead > disklayout.DirentSize {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/extent_file.go", "new_path": "pkg/sentry/fsimpl/ext/extent_file.go", "diff": "@@ -38,9 +38,10 @@ var _ io.ReaderAt = (*extentFile)(nil)\n// newExtentFile is the extent file constructor. It reads the entire extent\n// tree into memory.\n// TODO(b/134676337): Build extent tree on demand to reduce memory usage.\n-func newExtentFile(regFile regularFile) (*extentFile, error) {\n- file := &extentFile{regFile: regFile}\n+func newExtentFile(args inodeArgs) (*extentFile, error) {\n+ file := &extentFile{}\nfile.regFile.impl = file\n+ file.regFile.inode.init(args, &file.regFile)\nerr := file.buildExtTree()\nif err != nil {\nreturn nil, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/extent_test.go", "new_path": "pkg/sentry/fsimpl/ext/extent_test.go", "diff": "@@ -177,9 +177,8 @@ func extentTreeSetUp(t *testing.T, root *disklayout.ExtentNode) (*extentFile, []\nt.Helper()\nmockDisk := make([]byte, mockExtentBlkSize*10)\n- mockExtentFile := &extentFile{\n- regFile: regularFile{\n- inode: inode{\n+ mockExtentFile := &extentFile{}\n+ args := inodeArgs{\nfs: &filesystem{\ndev: bytes.NewReader(mockDisk),\n},\n@@ -189,9 +188,8 @@ func extentTreeSetUp(t *testing.T, root *disklayout.ExtentNode) (*extentFile, []\n},\n},\nblkSize: mockExtentBlkSize,\n- },\n- },\n}\n+ mockExtentFile.regFile.inode.init(args, &mockExtentFile.regFile)\nfileData := writeTree(&mockExtentFile.regFile.inode, mockDisk, node0, mockExtentBlkSize)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/inode.go", "new_path": "pkg/sentry/fsimpl/ext/inode.go", "diff": "@@ -118,7 +118,7 @@ func newInode(fs *filesystem, inodeNum uint32) (*inode, error) {\n}\n// Build the inode based on its type.\n- inode := inode{\n+ args := inodeArgs{\nfs: fs,\ninodeNum: inodeNum,\nblkSize: blkSize,\n@@ -127,19 +127,19 @@ func newInode(fs *filesystem, inodeNum uint32) (*inode, error) {\nswitch diskInode.Mode().FileType() {\ncase linux.ModeSymlink:\n- f, err := newSymlink(inode)\n+ f, err := newSymlink(args)\nif err != nil {\nreturn nil, err\n}\nreturn &f.inode, nil\ncase linux.ModeRegular:\n- f, err := newRegularFile(inode)\n+ f, err := newRegularFile(args)\nif err != nil {\nreturn nil, err\n}\nreturn &f.inode, nil\ncase linux.ModeDirectory:\n- f, err := newDirectory(inode, fs.sb.IncompatibleFeatures().DirentFileType)\n+ f, err := newDirectory(args, fs.sb.IncompatibleFeatures().DirentFileType)\nif err != nil {\nreturn nil, err\n}\n@@ -150,6 +150,21 @@ func newInode(fs *filesystem, inodeNum uint32) (*inode, error) {\n}\n}\n+type inodeArgs struct {\n+ fs *filesystem\n+ inodeNum uint32\n+ blkSize uint64\n+ diskInode disklayout.Inode\n+}\n+\n+func (in *inode) init(args inodeArgs, impl interface{}) {\n+ in.fs = args.fs\n+ in.inodeNum = args.inodeNum\n+ in.blkSize = args.blkSize\n+ in.diskInode = args.diskInode\n+ in.impl = impl\n+}\n+\n// open creates and returns a file description for the dentry passed in.\nfunc (in *inode) open(rp *vfs.ResolvingPath, vfsd *vfs.Dentry, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {\nats := vfs.AccessTypesForOpenFlags(opts)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/regular_file.go", "new_path": "pkg/sentry/fsimpl/ext/regular_file.go", "diff": "@@ -43,28 +43,19 @@ type regularFile struct {\n// newRegularFile is the regularFile constructor. It figures out what kind of\n// file this is and initializes the fileReader.\n-func newRegularFile(inode inode) (*regularFile, error) {\n- regFile := regularFile{\n- inode: inode,\n- }\n-\n- inodeFlags := inode.diskInode.Flags()\n-\n- if inodeFlags.Extents {\n- file, err := newExtentFile(regFile)\n+func newRegularFile(args inodeArgs) (*regularFile, error) {\n+ if args.diskInode.Flags().Extents {\n+ file, err := newExtentFile(args)\nif err != nil {\nreturn nil, err\n}\n-\n- file.regFile.inode.impl = &file.regFile\nreturn &file.regFile, nil\n}\n- file, err := newBlockMapFile(regFile)\n+ file, err := newBlockMapFile(args)\nif err != nil {\nreturn nil, err\n}\n- file.regFile.inode.impl = &file.regFile\nreturn &file.regFile, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/ext/symlink.go", "new_path": "pkg/sentry/fsimpl/ext/symlink.go", "diff": "@@ -30,18 +30,17 @@ type symlink struct {\n// newSymlink is the symlink constructor. It reads out the symlink target from\n// the inode (however it might have been stored).\n-func newSymlink(inode inode) (*symlink, error) {\n- var file *symlink\n+func newSymlink(args inodeArgs) (*symlink, error) {\nvar link []byte\n// If the symlink target is lesser than 60 bytes, its stores in inode.Data().\n// Otherwise either extents or block maps will be used to store the link.\n- size := inode.diskInode.Size()\n+ size := args.diskInode.Size()\nif size < 60 {\n- link = inode.diskInode.Data()[:size]\n+ link = args.diskInode.Data()[:size]\n} else {\n// Create a regular file out of this inode and read out the target.\n- regFile, err := newRegularFile(inode)\n+ regFile, err := newRegularFile(args)\nif err != nil {\nreturn nil, err\n}\n@@ -52,8 +51,8 @@ func newSymlink(inode inode) (*symlink, error) {\n}\n}\n- file = &symlink{inode: inode, target: string(link)}\n- file.inode.impl = file\n+ file := &symlink{target: string(link)}\n+ file.inode.init(args, file)\nreturn file, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Don't copy structs with sync.Mutex during initialization During inititalization inode struct was copied around, but it isn't great pratice to copy it around since it contains ref count and sync.Mutex. Updates #1480 PiperOrigin-RevId: 315983788
260,004
11.06.2020 16:08:06
25,200
4c0a8bdaf5e21ac85a4275e9008e5cd4294f45f3
Do not use tentative addresses for routes Tentative addresses should not be used when finding a route. This change fixes a bug where a tentative address may have been used. Test: stack_test.TestDADResolve
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -421,28 +421,52 @@ func TestDADResolve(t *testing.T) {\nt.Fatalf(\"CreateNIC(%d, _) = %s\", nicID, err)\n}\n+ // We add a default route so the call to FindRoute below will succeed\n+ // once we have an assigned address.\n+ s.SetRouteTable([]tcpip.Route{{\n+ Destination: header.IPv6EmptySubnet,\n+ Gateway: addr3,\n+ NIC: nicID,\n+ }})\n+\nif err := s.AddAddress(nicID, header.IPv6ProtocolNumber, addr1); err != nil {\nt.Fatalf(\"AddAddress(%d, %d, %s) = %s\", nicID, header.IPv6ProtocolNumber, addr1, err)\n}\n// Address should not be considered bound to the NIC yet (DAD ongoing).\n- addr, err := s.GetMainNICAddress(nicID, header.IPv6ProtocolNumber)\n- if err != nil {\n- t.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = (_, %v), want = (_, nil)\", nicID, header.IPv6ProtocolNumber, err)\n- }\n- if want := (tcpip.AddressWithPrefix{}); addr != want {\n+ if addr, err := s.GetMainNICAddress(nicID, header.IPv6ProtocolNumber); err != nil {\n+ t.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = (_, %s), want = (_, nil)\", nicID, header.IPv6ProtocolNumber, err)\n+ } else if want := (tcpip.AddressWithPrefix{}); addr != want {\nt.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = (%s, nil), want = (%s, nil)\", nicID, header.IPv6ProtocolNumber, addr, want)\n}\n// Make sure the address does not resolve before the resolution time has\n// passed.\ntime.Sleep(test.expectedRetransmitTimer*time.Duration(test.dupAddrDetectTransmits) - defaultAsyncEventTimeout)\n- addr, err = s.GetMainNICAddress(nicID, header.IPv6ProtocolNumber)\n- if err != nil {\n- t.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = (_, %v), want = (_, nil)\", nicID, header.IPv6ProtocolNumber, err)\n+ if addr, err := s.GetMainNICAddress(nicID, header.IPv6ProtocolNumber); err != nil {\n+ t.Errorf(\"got stack.GetMainNICAddress(%d, %d) = (_, %s), want = (_, nil)\", nicID, header.IPv6ProtocolNumber, err)\n+ } else if want := (tcpip.AddressWithPrefix{}); addr != want {\n+ t.Errorf(\"got stack.GetMainNICAddress(%d, %d) = (%s, nil), want = (%s, nil)\", nicID, header.IPv6ProtocolNumber, addr, want)\n}\n- if want := (tcpip.AddressWithPrefix{}); addr != want {\n- t.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = (%s, nil), want = (%s, nil)\", nicID, header.IPv6ProtocolNumber, addr, want)\n+ // Should not get a route even if we specify the local address as the\n+ // tentative address.\n+ {\n+ r, err := s.FindRoute(nicID, \"\", addr2, header.IPv6ProtocolNumber, false)\n+ if err != tcpip.ErrNoRoute {\n+ t.Errorf(\"got FindRoute(%d, '', %s, %d, false) = (%+v, %v), want = (_, %s)\", nicID, addr2, header.IPv6ProtocolNumber, r, err, tcpip.ErrNoRoute)\n+ }\n+ r.Release()\n+ }\n+ {\n+ r, err := s.FindRoute(nicID, addr1, addr2, header.IPv6ProtocolNumber, false)\n+ if err != tcpip.ErrNoRoute {\n+ t.Errorf(\"got FindRoute(%d, %s, %s, %d, false) = (%+v, %v), want = (_, %s)\", nicID, addr1, addr2, header.IPv6ProtocolNumber, r, err, tcpip.ErrNoRoute)\n+ }\n+ r.Release()\n+ }\n+\n+ if t.Failed() {\n+ t.FailNow()\n}\n// Wait for DAD to resolve.\n@@ -454,12 +478,33 @@ func TestDADResolve(t *testing.T) {\nt.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n}\n}\n- addr, err = s.GetMainNICAddress(nicID, header.IPv6ProtocolNumber)\n+ if addr, err := s.GetMainNICAddress(nicID, header.IPv6ProtocolNumber); err != nil {\n+ t.Errorf(\"got stack.GetMainNICAddress(%d, %d) = (_, %s), want = (_, nil)\", nicID, header.IPv6ProtocolNumber, err)\n+ } else if addr.Address != addr1 {\n+ t.Errorf(\"got stack.GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, header.IPv6ProtocolNumber, addr, addr1)\n+ }\n+ // Should get a route using the address now that it is resolved.\n+ {\n+ r, err := s.FindRoute(nicID, \"\", addr2, header.IPv6ProtocolNumber, false)\nif err != nil {\n- t.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = (_, %v), want = (_, nil)\", nicID, header.IPv6ProtocolNumber, err)\n+ t.Errorf(\"got FindRoute(%d, '', %s, %d, false): %s\", nicID, addr2, header.IPv6ProtocolNumber, err)\n+ } else if r.LocalAddress != addr1 {\n+ t.Errorf(\"got r.LocalAddress = %s, want = %s\", r.LocalAddress, addr1)\n}\n- if addr.Address != addr1 {\n- t.Fatalf(\"got stack.GetMainNICAddress(%d, %d) = %s, want = %s\", nicID, header.IPv6ProtocolNumber, addr, addr1)\n+ r.Release()\n+ }\n+ {\n+ r, err := s.FindRoute(nicID, addr1, addr2, header.IPv6ProtocolNumber, false)\n+ if err != nil {\n+ t.Errorf(\"got FindRoute(%d, %s, %s, %d, false): %s\", nicID, addr1, addr2, header.IPv6ProtocolNumber, err)\n+ } else if r.LocalAddress != addr1 {\n+ t.Errorf(\"got r.LocalAddress = %s, want = %s\", r.LocalAddress, addr1)\n+ }\n+ r.Release()\n+ }\n+\n+ if t.Failed() {\n+ t.FailNow()\n}\n// Should not have sent any more NS messages.\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/nic.go", "new_path": "pkg/tcpip/stack/nic.go", "diff": "@@ -610,20 +610,16 @@ func (n *NIC) getRefOrCreateTemp(protocol tcpip.NetworkProtocolNumber, address t\nif ref, ok := n.mu.endpoints[NetworkEndpointID{address}]; ok {\n// An endpoint with this id exists, check if it can be used and return it.\n- switch ref.getKind() {\n- case permanentExpired:\n- if !spoofingOrPromiscuous {\n+ if !ref.isAssignedRLocked(spoofingOrPromiscuous) {\nn.mu.RUnlock()\nreturn nil\n}\n- fallthrough\n- case temporary, permanent:\n+\nif ref.tryIncRef() {\nn.mu.RUnlock()\nreturn ref\n}\n}\n- }\n// A usable reference was not found, create a temporary one if requested by\n// the caller or if the address is found in the NIC's subnets.\n@@ -689,7 +685,6 @@ func (n *NIC) getRefOrCreateTempLocked(protocol tcpip.NetworkProtocolNumber, add\nPrefixLen: netProto.DefaultPrefixLen(),\n},\n}, peb, temporary, static, false)\n-\nreturn ref\n}\n@@ -1660,8 +1655,8 @@ func (r *referencedNetworkEndpoint) setKind(kind networkEndpointKind) {\n}\n// isValidForOutgoing returns true if the endpoint can be used to send out a\n-// packet. It requires the endpoint to not be marked expired (i.e., its address\n-// has been removed), or the NIC to be in spoofing mode.\n+// packet. It requires the endpoint to not be marked expired (i.e., its address)\n+// has been removed) unless the NIC is in spoofing mode, or temporary.\nfunc (r *referencedNetworkEndpoint) isValidForOutgoing() bool {\nr.nic.mu.RLock()\ndefer r.nic.mu.RUnlock()\n@@ -1669,13 +1664,28 @@ func (r *referencedNetworkEndpoint) isValidForOutgoing() bool {\nreturn r.isValidForOutgoingRLocked()\n}\n-// isValidForOutgoingRLocked returns true if the endpoint can be used to send\n-// out a packet. It requires the endpoint to not be marked expired (i.e., its\n-// address has been removed), or the NIC to be in spoofing mode.\n-//\n-// r's NIC must be read locked.\n+// isValidForOutgoingRLocked is the same as isValidForOutgoing but requires\n+// r.nic.mu to be read locked.\nfunc (r *referencedNetworkEndpoint) isValidForOutgoingRLocked() bool {\n- return r.nic.mu.enabled && (r.getKind() != permanentExpired || r.nic.mu.spoofing)\n+ if !r.nic.mu.enabled {\n+ return false\n+ }\n+\n+ return r.isAssignedRLocked(r.nic.mu.spoofing)\n+}\n+\n+// isAssignedRLocked returns true if r is considered to be assigned to the NIC.\n+//\n+// r.nic.mu must be read locked.\n+func (r *referencedNetworkEndpoint) isAssignedRLocked(spoofingOrPromiscuous bool) bool {\n+ switch r.getKind() {\n+ case permanentTentative:\n+ return false\n+ case permanentExpired:\n+ return spoofingOrPromiscuous\n+ default:\n+ return true\n+ }\n}\n// expireLocked decrements the reference count and marks the permanent endpoint\n" } ]
Go
Apache License 2.0
google/gvisor
Do not use tentative addresses for routes Tentative addresses should not be used when finding a route. This change fixes a bug where a tentative address may have been used. Test: stack_test.TestDADResolve PiperOrigin-RevId: 315997624
259,992
11.06.2020 17:32:42
25,200
fbe41987c999729ceec6851ff64acc9013a0c602
Enable nogo again
[ { "change_type": "MODIFY", "old_path": "tools/defs.bzl", "new_path": "tools/defs.bzl", "diff": "@@ -96,8 +96,7 @@ def go_imports(name, src, out):\ncmd = (\"$(location @org_golang_x_tools//cmd/goimports:goimports) $(SRCS) > $@\"),\n)\n-# TODO(b/158696872): Enable nogo by default.\n-def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = False, marshal_debug = False, nogo = False, **kwargs):\n+def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = False, marshal_debug = False, nogo = True, **kwargs):\n\"\"\"Wraps the standard go_library and does stateification and marshalling.\nThe recommended way is to use this rule with mostly identical configuration as the native\n" } ]
Go
Apache License 2.0
google/gvisor
Enable nogo again PiperOrigin-RevId: 316011323
259,854
11.06.2020 18:02:58
25,200
dc4e0157ef09632a25575810a70846ea81c4dd6b
Add test for reordering. Tests the effect of reordering on retransmission and window size. Test covers the expected behavior of both Linux and netstack, however, netstack does not behave as expected. Further, the current expected behavior of netstack is not ideal and should be adjusted in the future.
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/proto/posix_server.proto", "new_path": "test/packetimpact/proto/posix_server.proto", "diff": "@@ -150,7 +150,7 @@ message SendRequest {\nmessage SendResponse {\nint32 ret = 1;\n- int32 errno_ = 2;\n+ int32 errno_ = 2; // \"errno\" may fail to compile in c++.\n}\nmessage SendToRequest {\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/packetimpact_test.go", "new_path": "test/packetimpact/runner/packetimpact_test.go", "diff": "@@ -268,6 +268,7 @@ func TestOne(t *testing.T) {\n\"--remote_ipv6\", remoteIPv6.String(),\n\"--remote_mac\", remoteMAC.String(),\n\"--device\", testNetDev,\n+ \"--dut_type\", *dutPlatform,\n)\n_, err = testbench.Exec(dockerutil.RunOpts{}, testArgs...)\nif !*expectFailure && err != nil {\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/testbench/testbench.go", "new_path": "test/packetimpact/testbench/testbench.go", "diff": "@@ -25,6 +25,8 @@ import (\n)\nvar (\n+ // DUTType is the type of device under test.\n+ DUTType = \"\"\n// Device is the local device on the test network.\nDevice = \"\"\n// LocalIPv4 is the local IPv4 address on the test network.\n@@ -63,6 +65,7 @@ func RegisterFlags(fs *flag.FlagSet) {\nfs.StringVar(&RemoteIPv6, \"remote_ipv6\", RemoteIPv6, \"remote IPv6 address for test packets\")\nfs.StringVar(&RemoteMAC, \"remote_mac\", RemoteMAC, \"remote mac address for test packets\")\nfs.StringVar(&Device, \"device\", Device, \"local device for test packets\")\n+ fs.StringVar(&DUTType, \"dut_type\", DUTType, \"type of device under test\")\n}\n// genPseudoFlags populates flag-like global config based on real flags.\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/BUILD", "new_path": "test/packetimpact/tests/BUILD", "diff": "@@ -52,6 +52,19 @@ packetimpact_go_test(\n],\n)\n+packetimpact_go_test(\n+ name = \"tcp_reordering\",\n+ srcs = [\"tcp_reordering_test.go\"],\n+ # TODO(b/139368047): Fix netstack then remove the line below.\n+ expect_netstack_failure = True,\n+ deps = [\n+ \"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/seqnum\",\n+ \"//test/packetimpact/testbench\",\n+ \"@org_golang_x_sys//unix:go_default_library\",\n+ ],\n+)\n+\npacketimpact_go_test(\nname = \"tcp_window_shrink\",\nsrcs = [\"tcp_window_shrink_test.go\"],\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/packetimpact/tests/tcp_reordering_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package reordering_test\n+\n+import (\n+ \"flag\"\n+ \"testing\"\n+ \"time\"\n+\n+ \"golang.org/x/sys/unix\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/seqnum\"\n+ tb \"gvisor.dev/gvisor/test/packetimpact/testbench\"\n+)\n+\n+func init() {\n+ tb.RegisterFlags(flag.CommandLine)\n+}\n+\n+func TestReorderingWindow(t *testing.T) {\n+ dut := tb.NewDUT(t)\n+ defer dut.TearDown()\n+ listenFd, remotePort := dut.CreateListener(unix.SOCK_STREAM, unix.IPPROTO_TCP, 1)\n+ defer dut.Close(listenFd)\n+ conn := tb.NewTCPIPv4(t, tb.TCP{DstPort: &remotePort}, tb.TCP{SrcPort: &remotePort})\n+ defer conn.Close()\n+\n+ // Enable SACK.\n+ opts := make([]byte, 40)\n+ optsOff := 0\n+ optsOff += header.EncodeNOP(opts[optsOff:])\n+ optsOff += header.EncodeNOP(opts[optsOff:])\n+ optsOff += header.EncodeSACKPermittedOption(opts[optsOff:])\n+\n+ // Ethernet guarantees that the MTU is at least 1500 bytes.\n+ const minMTU = 1500\n+ const mss = minMTU - header.IPv4MinimumSize - header.TCPMinimumSize\n+ optsOff += header.EncodeMSSOption(mss, opts[optsOff:])\n+\n+ conn.ConnectWithOptions(opts[:optsOff])\n+\n+ acceptFd, _ := dut.Accept(listenFd)\n+ defer dut.Close(acceptFd)\n+\n+ if tb.DUTType == \"linux\" {\n+ // Linux has changed its handling of reordering, force the old behavior.\n+ dut.SetSockOpt(acceptFd, unix.IPPROTO_TCP, unix.TCP_CONGESTION, []byte(\"reno\"))\n+ }\n+\n+ pls := dut.GetSockOptInt(acceptFd, unix.IPPROTO_TCP, unix.TCP_MAXSEG)\n+ if tb.DUTType == \"netstack\" {\n+ // netstack does not impliment TCP_MAXSEG correctly. Fake it\n+ // here. Netstack uses the max SACK size which is 32. The MSS\n+ // option is 8 bytes, making the total 36 bytes.\n+ pls = mss - 36\n+ }\n+\n+ payload := make([]byte, pls)\n+\n+ seqNum1 := *conn.RemoteSeqNum()\n+ const numPkts = 10\n+ // Send some packets, checking that we receive each.\n+ for i, sn := 0, seqNum1; i < numPkts; i++ {\n+ dut.Send(acceptFd, payload, 0)\n+\n+ gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)\n+ sn.UpdateForward(seqnum.Size(len(payload)))\n+ if err != nil {\n+ t.Errorf(\"Expect #%d: %s\", i+1, err)\n+ continue\n+ }\n+ if gotOne == nil {\n+ t.Errorf(\"#%d: expected a packet within a second but got none\", i+1)\n+ }\n+ }\n+\n+ seqNum2 := *conn.RemoteSeqNum()\n+\n+ // SACK packets #2-4.\n+ sackBlock := make([]byte, 40)\n+ sbOff := 0\n+ sbOff += header.EncodeNOP(sackBlock[sbOff:])\n+ sbOff += header.EncodeNOP(sackBlock[sbOff:])\n+ sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{\n+ seqNum1.Add(seqnum.Size(len(payload))),\n+ seqNum1.Add(seqnum.Size(4 * len(payload))),\n+ }}, sackBlock[sbOff:])\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})\n+\n+ // ACK first packet.\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum1) + uint32(len(payload)))})\n+\n+ // Check for retransmit.\n+ gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(seqNum1))}, time.Second)\n+ if err != nil {\n+ t.Error(\"Expect for retransmit:\", err)\n+ }\n+ if gotOne == nil {\n+ t.Error(\"expected a retransmitted packet within a second but got none\")\n+ }\n+\n+ // ACK all send packets with a DSACK block for packet #1. This tells\n+ // the other end that we got both the original and retransmit for\n+ // packet #1.\n+ dsackBlock := make([]byte, 40)\n+ dsbOff := 0\n+ dsbOff += header.EncodeNOP(dsackBlock[dsbOff:])\n+ dsbOff += header.EncodeNOP(dsackBlock[dsbOff:])\n+ dsbOff += header.EncodeSACKBlocks([]header.SACKBlock{{\n+ seqNum1.Add(seqnum.Size(len(payload))),\n+ seqNum1.Add(seqnum.Size(4 * len(payload))),\n+ }}, dsackBlock[dsbOff:])\n+\n+ conn.Send(tb.TCP{Flags: tb.Uint8(header.TCPFlagAck), AckNum: tb.Uint32(uint32(seqNum2)), Options: dsackBlock[:dsbOff]})\n+\n+ // Send half of the original window of packets, checking that we\n+ // received each.\n+ for i, sn := 0, seqNum2; i < numPkts/2; i++ {\n+ dut.Send(acceptFd, payload, 0)\n+\n+ gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)\n+ sn.UpdateForward(seqnum.Size(len(payload)))\n+ if err != nil {\n+ t.Errorf(\"Expect #%d: %s\", i+1, err)\n+ continue\n+ }\n+ if gotOne == nil {\n+ t.Errorf(\"#%d: expected a packet within a second but got none\", i+1)\n+ }\n+ }\n+\n+ if tb.DUTType == \"netstack\" {\n+ // The window should now be halved, so we should receive any\n+ // more, even if we send them.\n+ dut.Send(acceptFd, payload, 0)\n+ if got, err := conn.Expect(tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {\n+ t.Fatalf(\"expected no packets within 100 millisecond, but got one: %s\", got)\n+ }\n+ return\n+ }\n+\n+ // Linux reduces the window by three. Check that we can receive the rest.\n+ for i, sn := 0, seqNum2.Add(seqnum.Size(numPkts/2*len(payload))); i < 2; i++ {\n+ dut.Send(acceptFd, payload, 0)\n+\n+ gotOne, err := conn.Expect(tb.TCP{SeqNum: tb.Uint32(uint32(sn))}, time.Second)\n+ sn.UpdateForward(seqnum.Size(len(payload)))\n+ if err != nil {\n+ t.Errorf(\"Expect #%d: %s\", i+1, err)\n+ continue\n+ }\n+ if gotOne == nil {\n+ t.Errorf(\"#%d: expected a packet within a second but got none\", i+1)\n+ }\n+ }\n+\n+ // The window should now be full.\n+ dut.Send(acceptFd, payload, 0)\n+ if got, err := conn.Expect(tb.TCP{}, 100*time.Millisecond); got != nil || err == nil {\n+ t.Fatalf(\"expected no packets within 100 millisecond, but got one: %s\", got)\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Add test for reordering. Tests the effect of reordering on retransmission and window size. Test covers the expected behavior of both Linux and netstack, however, netstack does not behave as expected. Further, the current expected behavior of netstack is not ideal and should be adjusted in the future. PiperOrigin-RevId: 316015184
259,858
11.06.2020 19:03:46
25,200
5a894e35a090232085fbb20c71d1787c266bd995
Remove generated logs when test succeeds.
[ { "change_type": "MODIFY", "old_path": "test/runner/runner.go", "new_path": "test/runner/runner.go", "diff": "@@ -160,8 +160,9 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\nargs = append(args, \"-fsgofer-host-uds\")\n}\n- if outDir, ok := syscall.Getenv(\"TEST_UNDECLARED_OUTPUTS_DIR\"); ok {\n- tdir := filepath.Join(outDir, strings.Replace(name, \"/\", \"_\", -1))\n+ undeclaredOutputsDir, ok := syscall.Getenv(\"TEST_UNDECLARED_OUTPUTS_DIR\")\n+ if ok {\n+ tdir := filepath.Join(undeclaredOutputsDir, strings.Replace(name, \"/\", \"_\", -1))\nif err := os.MkdirAll(tdir, 0755); err != nil {\nreturn fmt.Errorf(\"could not create test dir: %v\", err)\n}\n@@ -201,7 +202,9 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\ncmd.Stdout = os.Stdout\ncmd.Stderr = os.Stderr\nsig := make(chan os.Signal, 1)\n+ defer close(sig)\nsignal.Notify(sig, syscall.SIGTERM)\n+ defer signal.Stop(sig)\ngo func() {\ns, ok := <-sig\nif !ok {\n@@ -237,9 +240,11 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {\n}()\nerr = cmd.Run()\n-\n- signal.Stop(sig)\n- close(sig)\n+ if err == nil {\n+ // If the test passed, then we erase the log directory. This speeds up\n+ // uploading logs in continuous integration & saves on disk space.\n+ os.RemoveAll(undeclaredOutputsDir)\n+ }\nreturn err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove generated logs when test succeeds. PiperOrigin-RevId: 316022884
259,884
11.06.2020 19:29:34
25,200
8ea99d58ffd708aa7a26be58d89cb817d8eceec6
Set the HOME environment variable for sub-containers. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/test/criutil/criutil.go", "new_path": "pkg/test/criutil/criutil.go", "diff": "@@ -113,6 +113,17 @@ func (cc *Crictl) Exec(contID string, args ...string) (string, error) {\nreturn output, nil\n}\n+// Logs retrieves the container logs. It corresponds to `crictl logs`.\n+func (cc *Crictl) Logs(contID string, args ...string) (string, error) {\n+ a := []string{\"logs\", contID}\n+ a = append(a, args...)\n+ output, err := cc.run(a...)\n+ if err != nil {\n+ return \"\", fmt.Errorf(\"logs failed: %v\", err)\n+ }\n+ return output, nil\n+}\n+\n// Rm removes a container. It corresponds to `crictl rm`.\nfunc (cc *Crictl) Rm(contID string) error {\n_, err := cc.run(\"rm\", contID)\n" }, { "change_type": "MODIFY", "old_path": "pkg/test/testutil/testutil.go", "new_path": "pkg/test/testutil/testutil.go", "diff": "@@ -251,7 +251,10 @@ func RandomID(prefix string) string {\nif _, err := rand.Read(b); err != nil {\npanic(\"rand.Read failed: \" + err.Error())\n}\n- return fmt.Sprintf(\"%s-%s\", prefix, base32.StdEncoding.EncodeToString(b))\n+ if prefix != \"\" {\n+ prefix = prefix + \"-\"\n+ }\n+ return fmt.Sprintf(\"%s%s\", prefix, base32.StdEncoding.EncodeToString(b))\n}\n// RandomContainerID generates a random container id for each test.\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -754,6 +754,21 @@ func (l *Loader) startContainer(spec *specs.Spec, conf *Config, cid string, file\nreturn err\n}\n+ // Add the HOME enviroment variable if it is not already set.\n+ var envv []string\n+ if kernel.VFS2Enabled {\n+ envv, err = user.MaybeAddExecUserHomeVFS2(ctx, procArgs.MountNamespaceVFS2,\n+ procArgs.Credentials.RealKUID, procArgs.Envv)\n+\n+ } else {\n+ envv, err = user.MaybeAddExecUserHome(ctx, procArgs.MountNamespace,\n+ procArgs.Credentials.RealKUID, procArgs.Envv)\n+ }\n+ if err != nil {\n+ return err\n+ }\n+ procArgs.Envv = envv\n+\n// Create and start the new process.\ntg, _, err := l.k.CreateProcess(procArgs)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "runsc/container/multi_container_test.go", "new_path": "runsc/container/multi_container_test.go", "diff": "@@ -1698,3 +1698,83 @@ func TestMultiContainerRunNonRoot(t *testing.T) {\nt.Fatalf(\"child container failed, waitStatus: %v\", ws)\n}\n}\n+\n+// TestMultiContainerHomeEnvDir tests that the HOME environment variable is set\n+// for root containers, sub-containers, and execed processes.\n+func TestMultiContainerHomeEnvDir(t *testing.T) {\n+ // TODO(gvisor.dev/issue/1487): VFSv2 configs failing.\n+ // NOTE: Don't use overlay since we need changes to persist to the temp dir\n+ // outside the sandbox.\n+ for testName, conf := range configs(t, noOverlay...) {\n+ t.Run(testName, func(t *testing.T) {\n+\n+ rootDir, cleanup, err := testutil.SetupRootDir()\n+ if err != nil {\n+ t.Fatalf(\"error creating root dir: %v\", err)\n+ }\n+ defer cleanup()\n+ conf.RootDir = rootDir\n+\n+ // Create temp files we can write the value of $HOME to.\n+ homeDirs := map[string]*os.File{}\n+ for _, name := range []string{\"root\", \"sub\", \"exec\"} {\n+ homeFile, err := ioutil.TempFile(testutil.TmpDir(), name)\n+ if err != nil {\n+ t.Fatalf(\"creating temp file: %v\", err)\n+ }\n+ homeDirs[name] = homeFile\n+ }\n+\n+ // We will sleep in the root container in order to ensure that\n+ // the root container doesn't terminate before sub containers can be\n+ // created.\n+ rootCmd := []string{\"/bin/sh\", \"-c\", fmt.Sprintf(\"printf \\\"$HOME\\\" > %s; sleep 1000\", homeDirs[\"root\"].Name())}\n+ subCmd := []string{\"/bin/sh\", \"-c\", fmt.Sprintf(\"printf \\\"$HOME\\\" > %s\", homeDirs[\"sub\"].Name())}\n+ execCmd := []string{\"/bin/sh\", \"-c\", fmt.Sprintf(\"printf \\\"$HOME\\\" > %s\", homeDirs[\"exec\"].Name())}\n+\n+ // Setup the containers, a root container and sub container.\n+ specConfig, ids := createSpecs(rootCmd, subCmd)\n+ containers, cleanup, err := startContainers(conf, specConfig, ids)\n+ if err != nil {\n+ t.Fatalf(\"error starting containers: %v\", err)\n+ }\n+ defer cleanup()\n+\n+ // Exec into the root container synchronously.\n+ args := &control.ExecArgs{Argv: execCmd}\n+ if _, err := containers[0].executeSync(args); err != nil {\n+ t.Errorf(\"error executing %+v: %v\", args, err)\n+ }\n+\n+ // Wait for the subcontainer to finish.\n+ _, err = containers[1].Wait()\n+ if err != nil {\n+ t.Errorf(\"wait on child container: %v\", err)\n+ }\n+\n+ // Wait for the root container to run.\n+ expectedPL := []*control.Process{\n+ newProcessBuilder().Cmd(\"sh\").Process(),\n+ newProcessBuilder().Cmd(\"sleep\").Process(),\n+ }\n+ if err := waitForProcessList(containers[0], expectedPL); err != nil {\n+ t.Errorf(\"failed to wait for sleep to start: %v\", err)\n+ }\n+\n+ // Check the written files.\n+ for name, tmpFile := range homeDirs {\n+ dirBytes, err := ioutil.ReadAll(tmpFile)\n+ if err != nil {\n+ t.Fatalf(\"reading %s temp file: %v\", name, err)\n+ }\n+ got := string(dirBytes)\n+\n+ want := \"/\"\n+ if got != want {\n+ t.Errorf(\"%s $HOME incorrect: got: %q, want: %q\", name, got, want)\n+ }\n+ }\n+\n+ })\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "test/root/crictl_test.go", "new_path": "test/root/crictl_test.go", "diff": "@@ -39,6 +39,29 @@ import (\n// Tests for crictl have to be run as root (rather than in a user namespace)\n// because crictl creates named network namespaces in /var/run/netns/.\n+// Sandbox returns a JSON config for a simple sandbox. Sandbox names must be\n+// unique so different names should be used when running tests on the same\n+// containerd instance.\n+func Sandbox(name string) string {\n+ // Sandbox is a default JSON config for a sandbox.\n+ s := map[string]interface{}{\n+ \"metadata\": map[string]string{\n+ \"name\": name,\n+ \"namespace\": \"default\",\n+ \"uid\": testutil.RandomID(\"\"),\n+ },\n+ \"linux\": map[string]string{},\n+ \"log_directory\": \"/tmp\",\n+ }\n+\n+ v, err := json.Marshal(s)\n+ if err != nil {\n+ // This shouldn't happen.\n+ panic(err)\n+ }\n+ return string(v)\n+}\n+\n// SimpleSpec returns a JSON config for a simple container that runs the\n// specified command in the specified image.\nfunc SimpleSpec(name, image string, cmd []string, extra map[string]interface{}) string {\n@@ -49,7 +72,9 @@ func SimpleSpec(name, image string, cmd []string, extra map[string]interface{})\n\"image\": map[string]string{\n\"image\": testutil.ImageByName(image),\n},\n- \"log_path\": fmt.Sprintf(\"%s.log\", name),\n+ // Log files are not deleted after root tests are run. Log to random\n+ // paths to ensure logs are fresh.\n+ \"log_path\": fmt.Sprintf(\"%s.log\", testutil.RandomID(name)),\n}\nif len(cmd) > 0 { // Omit if empty.\ns[\"command\"] = cmd\n@@ -65,20 +90,6 @@ func SimpleSpec(name, image string, cmd []string, extra map[string]interface{})\nreturn string(v)\n}\n-// Sandbox is a default JSON config for a sandbox.\n-var Sandbox = `{\n- \"metadata\": {\n- \"name\": \"default-sandbox\",\n- \"namespace\": \"default\",\n- \"attempt\": 1,\n- \"uid\": \"hdishd83djaidwnduwk28bcsb\"\n- },\n- \"linux\": {\n- },\n- \"log_directory\": \"/tmp\"\n-}\n-`\n-\n// Httpd is a JSON config for an httpd container.\nvar Httpd = SimpleSpec(\"httpd\", \"basic/httpd\", nil, nil)\n@@ -90,7 +101,7 @@ func TestCrictlSanity(t *testing.T) {\nt.Fatalf(\"failed to setup crictl: %v\", err)\n}\ndefer cleanup()\n- podID, contID, err := crictl.StartPodAndContainer(\"basic/httpd\", Sandbox, Httpd)\n+ podID, contID, err := crictl.StartPodAndContainer(\"basic/httpd\", Sandbox(\"default\"), Httpd)\nif err != nil {\nt.Fatalf(\"start failed: %v\", err)\n}\n@@ -142,7 +153,7 @@ func TestMountPaths(t *testing.T) {\nt.Fatalf(\"failed to setup crictl: %v\", err)\n}\ndefer cleanup()\n- podID, contID, err := crictl.StartPodAndContainer(\"basic/httpd\", Sandbox, HttpdMountPaths)\n+ podID, contID, err := crictl.StartPodAndContainer(\"basic/httpd\", Sandbox(\"default\"), HttpdMountPaths)\nif err != nil {\nt.Fatalf(\"start failed: %v\", err)\n}\n@@ -168,7 +179,7 @@ func TestMountOverSymlinks(t *testing.T) {\ndefer cleanup()\nspec := SimpleSpec(\"busybox\", \"basic/resolv\", []string{\"sleep\", \"1000\"}, nil)\n- podID, contID, err := crictl.StartPodAndContainer(\"basic/resolv\", Sandbox, spec)\n+ podID, contID, err := crictl.StartPodAndContainer(\"basic/resolv\", Sandbox(\"default\"), spec)\nif err != nil {\nt.Fatalf(\"start failed: %v\", err)\n}\n@@ -200,7 +211,7 @@ func TestMountOverSymlinks(t *testing.T) {\n}\n// TestHomeDir tests that the HOME environment variable is set for\n-// multi-containers.\n+// Pod containers.\nfunc TestHomeDir(t *testing.T) {\n// Setup containerd and crictl.\ncrictl, cleanup, err := setup(t)\n@@ -208,48 +219,52 @@ func TestHomeDir(t *testing.T) {\nt.Fatalf(\"failed to setup crictl: %v\", err)\n}\ndefer cleanup()\n- contSpec := SimpleSpec(\"root\", \"basic/busybox\", []string{\"sleep\", \"1000\"}, nil)\n- podID, contID, err := crictl.StartPodAndContainer(\"basic/busybox\", Sandbox, contSpec)\n+\n+ // Note that container ID returned here is a sub-container. All Pod\n+ // containers are sub-containers. The root container of the sandbox is the\n+ // pause container.\n+ t.Run(\"sub-container\", func(t *testing.T) {\n+ contSpec := SimpleSpec(\"subcontainer\", \"basic/busybox\", []string{\"sh\", \"-c\", \"echo $HOME\"}, nil)\n+ podID, contID, err := crictl.StartPodAndContainer(\"basic/busybox\", Sandbox(\"subcont-sandbox\"), contSpec)\nif err != nil {\nt.Fatalf(\"start failed: %v\", err)\n}\n- t.Run(\"root container\", func(t *testing.T) {\n- out, err := crictl.Exec(contID, \"sh\", \"-c\", \"echo $HOME\")\n+ out, err := crictl.Logs(contID)\nif err != nil {\n- t.Fatalf(\"exec failed: %v, out: %s\", err, out)\n+ t.Fatalf(\"failed retrieving container logs: %v, out: %s\", err, out)\n}\nif got, want := strings.TrimSpace(string(out)), \"/root\"; got != want {\nt.Fatalf(\"Home directory invalid. Got %q, Want : %q\", got, want)\n}\n+\n+ // Stop everything.\n+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {\n+ t.Fatalf(\"stop failed: %v\", err)\n+ }\n})\n- t.Run(\"sub-container\", func(t *testing.T) {\n- // Create a sub container in the same pod.\n- subContSpec := SimpleSpec(\"subcontainer\", \"basic/busybox\", []string{\"sleep\", \"1000\"}, nil)\n- subContID, err := crictl.StartContainer(podID, \"basic/busybox\", Sandbox, subContSpec)\n+ // Tests that HOME is set for the exec process.\n+ t.Run(\"exec\", func(t *testing.T) {\n+ contSpec := SimpleSpec(\"exec\", \"basic/busybox\", []string{\"sleep\", \"1000\"}, nil)\n+ podID, contID, err := crictl.StartPodAndContainer(\"basic/busybox\", Sandbox(\"exec-sandbox\"), contSpec)\nif err != nil {\nt.Fatalf(\"start failed: %v\", err)\n}\n- out, err := crictl.Exec(subContID, \"sh\", \"-c\", \"echo $HOME\")\n+ out, err := crictl.Exec(contID, \"sh\", \"-c\", \"echo $HOME\")\nif err != nil {\n- t.Fatalf(\"exec failed: %v, out: %s\", err, out)\n+ t.Fatalf(\"failed retrieving container logs: %v, out: %s\", err, out)\n}\nif got, want := strings.TrimSpace(string(out)), \"/root\"; got != want {\nt.Fatalf(\"Home directory invalid. Got %q, Want : %q\", got, want)\n}\n- if err := crictl.StopContainer(subContID); err != nil {\n- t.Fatalf(\"stop failed: %v\", err)\n- }\n- })\n-\n// Stop everything.\nif err := crictl.StopPodAndContainer(podID, contID); err != nil {\nt.Fatalf(\"stop failed: %v\", err)\n}\n-\n+ })\n}\n// containerdConfigTemplate is a .toml config for containerd. It contains a\n" } ]
Go
Apache License 2.0
google/gvisor
Set the HOME environment variable for sub-containers. Fixes #701 PiperOrigin-RevId: 316025635
260,023
11.06.2020 19:45:06
25,200
61d6c059ac3434ddf0c206975a116a09d0838338
Replace use of %v in packetimpact tests
[ { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/ipv4_id_uniqueness_test.go", "new_path": "test/packetimpact/tests/ipv4_id_uniqueness_test.go", "diff": "@@ -37,7 +37,7 @@ func recvTCPSegment(conn *testbench.TCPIPv4, expect *testbench.TCP, expectPayloa\nreturn 0, fmt.Errorf(\"failed to receive TCP segment: %s\", err)\n}\nif len(layers) < 2 {\n- return 0, fmt.Errorf(\"got packet with layers: %v, expected to have at least 2 layers (link and network)\", layers)\n+ return 0, fmt.Errorf(\"got packet with layers: %s, expected to have at least 2 layers (link and network)\", layers)\n}\nipv4, ok := layers[1].(*testbench.IPv4)\nif !ok {\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_retransmits_test.go", "new_path": "test/packetimpact/tests/tcp_retransmits_test.go", "diff": "@@ -49,7 +49,7 @@ func TestRetransmits(t *testing.T) {\ndut.Send(acceptFd, sampleData, 0)\nif _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\n// Give a chance for the dut to estimate RTO with RTT from the DATA-ACK.\n// TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which\n@@ -62,13 +62,13 @@ func TestRetransmits(t *testing.T) {\ndut.Send(acceptFd, sampleData, 0)\nseq := testbench.Uint32(uint32(*conn.RemoteSeqNum()))\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: seq}, samplePayload, startRTO); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\n// Expect retransmits of the same segment.\nfor i := 0; i < 5; i++ {\nstart := time.Now()\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: seq}, samplePayload, 2*current); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s loop %d\", samplePayload, err, i)\n+ t.Fatalf(\"expected payload was not received: %s loop %d\", err, i)\n}\nif i == 0 {\nstartRTO = time.Now().Sub(first)\n@@ -77,7 +77,7 @@ func TestRetransmits(t *testing.T) {\n}\n// Check if the probes came at exponentially increasing intervals.\nif p := time.Since(start); p < current-startRTO {\n- t.Fatalf(\"retransmit came sooner interval %d probe %d\\n\", p, i)\n+ t.Fatalf(\"retransmit came sooner interval %d probe %d\", p, i)\n}\ncurrent *= 2\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_window_shrink_test.go", "new_path": "test/packetimpact/tests/tcp_window_shrink_test.go", "diff": "@@ -47,17 +47,17 @@ func TestWindowShrink(t *testing.T) {\ndut.Send(acceptFd, sampleData, 0)\nif _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\nconn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})\ndut.Send(acceptFd, sampleData, 0)\ndut.Send(acceptFd, sampleData, 0)\nif _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\nif _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\n// We close our receiving window here\nconn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})\n@@ -68,6 +68,6 @@ func TestWindowShrink(t *testing.T) {\n// the following lines.\nexpectedRemoteSeqNum := *conn.RemoteSeqNum() - 1\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: testbench.Uint32(uint32(expectedRemoteSeqNum))}, nil, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with sequence number %v: %s\", expectedRemoteSeqNum, err)\n+ t.Fatalf(\"expected a packet with sequence number %d: %s\", expectedRemoteSeqNum, err)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go", "new_path": "test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go", "diff": "@@ -50,11 +50,11 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {\n// Send and receive sample data to the dut.\ndut.Send(acceptFd, sampleData, 0)\nif _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\nconn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)\nif _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with sequence number %s\", err)\n+ t.Fatalf(\"expected packet was not received: %s\", err)\n}\n// Check for the dut to keep the connection alive as long as the zero window\n@@ -80,7 +80,7 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {\n// first retransmission time. The retransmission times is supposed to\n// exponentially increase.\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, 2*current); err != nil {\n- t.Fatalf(\"expected a probe with sequence number %v: loop %d\", probeSeq, i)\n+ t.Fatalf(\"expected a probe with sequence number %d: loop %d\", probeSeq, i)\n}\nif i == 0 {\nstartProbeDuration = time.Now().Sub(first)\n@@ -100,6 +100,6 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {\n// Expect the dut to recover and transmit data.\nif _, err := conn.ExpectData(&testbench.\nTCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_zero_window_probe_test.go", "new_path": "test/packetimpact/tests/tcp_zero_window_probe_test.go", "diff": "@@ -51,12 +51,12 @@ func TestZeroWindowProbe(t *testing.T) {\n// Send and receive sample data to the dut.\ndut.Send(acceptFd, sampleData, 0)\nif _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\nsendTime := time.Now().Sub(start)\nconn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)\nif _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with sequence number %s\", err)\n+ t.Fatalf(\"expected packet was not received: %s\", err)\n}\n// Test 1: Check for receive of a zero window probe, record the duration for\n@@ -73,7 +73,7 @@ func TestZeroWindowProbe(t *testing.T) {\n// Expect there are no zero-window probes sent until there is data to be sent out\n// from the dut.\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, 2*time.Second); err == nil {\n- t.Fatalf(\"unexpected a packet with sequence number %v: %s\", probeSeq, err)\n+ t.Fatalf(\"unexpected packet with sequence number %d: %s\", probeSeq, err)\n}\nstart = time.Now()\n@@ -81,13 +81,13 @@ func TestZeroWindowProbe(t *testing.T) {\ndut.Send(acceptFd, sampleData, 0)\n// Expect zero-window probe from the dut.\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with sequence number %v: %s\", probeSeq, err)\n+ t.Fatalf(\"expected a packet with sequence number %d: %s\", probeSeq, err)\n}\n// Expect the probe to be sent after some time. Compare against the previous\n// time recorded when the dut immediately sends out data on receiving the\n// send command.\nif startProbeDuration := time.Now().Sub(start); startProbeDuration <= sendTime {\n- t.Fatalf(\"expected the first probe to be sent out after retransmission interval, got %v want > %v\\n\", startProbeDuration, sendTime)\n+ t.Fatalf(\"expected the first probe to be sent out after retransmission interval, got %s want > %s\", startProbeDuration, sendTime)\n}\n// Test 2: Check if the dut recovers on advertizing non-zero receive window.\n@@ -97,7 +97,7 @@ func TestZeroWindowProbe(t *testing.T) {\nconn.Send(testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck)})\n// Expect the dut to recover and transmit data.\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\n// Test 3: Sanity check for dut's processing of a similar probe it sent.\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go", "new_path": "test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go", "diff": "@@ -50,11 +50,11 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {\n// Send and receive sample data to the dut.\ndut.Send(acceptFd, sampleData, 0)\nif _, err := conn.ExpectData(&testbench.TCP{}, samplePayload, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with payload %v: %s\", samplePayload, err)\n+ t.Fatalf(\"expected payload was not received: %s\", err)\n}\nconn.Send(testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)\nif _, err := conn.ExpectData(&testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with sequence number %s\", err)\n+ t.Fatalf(\"expected packet was not received: %s\", err)\n}\n// Test 1: Check for receive of a zero window probe, record the duration for\n@@ -70,7 +70,7 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {\ndut.Send(acceptFd, sampleData, 0)\n// Expect zero-window probe from the dut.\nif _, err := conn.ExpectData(&testbench.TCP{SeqNum: probeSeq}, nil, time.Second); err != nil {\n- t.Fatalf(\"expected a packet with sequence number %v: %s\", probeSeq, err)\n+ t.Fatalf(\"expected a packet with sequence number %d: %s\", probeSeq, err)\n}\n// Record the duration for first probe, the dut sends the zero window probe after\n// a retransmission time interval.\n" } ]
Go
Apache License 2.0
google/gvisor
Replace use of %v in packetimpact tests PiperOrigin-RevId: 316027588
259,853
12.06.2020 11:56:43
25,200
6ec9d60403fdf7a33072eaa023e62bfd56ed9f5c
vfs2: implement fcntl(fd, F_SETFL, flags)
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/fd_table.go", "new_path": "pkg/sentry/kernel/fd_table.go", "diff": "@@ -458,6 +458,29 @@ func (f *FDTable) SetFlags(fd int32, flags FDFlags) error {\nreturn nil\n}\n+// SetFlagsVFS2 sets the flags for the given file descriptor.\n+//\n+// True is returned iff flags were changed.\n+func (f *FDTable) SetFlagsVFS2(fd int32, flags FDFlags) error {\n+ if fd < 0 {\n+ // Don't accept negative FDs.\n+ return syscall.EBADF\n+ }\n+\n+ f.mu.Lock()\n+ defer f.mu.Unlock()\n+\n+ file, _, _ := f.getVFS2(fd)\n+ if file == nil {\n+ // No file found.\n+ return syscall.EBADF\n+ }\n+\n+ // Update the flags.\n+ f.setVFS2(fd, file, flags)\n+ return nil\n+}\n+\n// Get returns a reference to the file and the flags for the FD or nil if no\n// file is defined for the given fd.\n//\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -935,10 +935,10 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn uintptr(flags.ToLinuxFDFlags()), nil, nil\ncase linux.F_SETFD:\nflags := args[2].Uint()\n- t.FDTable().SetFlags(fd, kernel.FDFlags{\n+ err := t.FDTable().SetFlags(fd, kernel.FDFlags{\nCloseOnExec: flags&linux.FD_CLOEXEC != 0,\n})\n- return 0, nil, nil\n+ return 0, nil, err\ncase linux.F_GETFL:\nreturn uintptr(file.Flags().ToLinux()), nil, nil\ncase linux.F_SETFL:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "diff": "@@ -134,10 +134,10 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\nreturn uintptr(flags.ToLinuxFDFlags()), nil, nil\ncase linux.F_SETFD:\nflags := args[2].Uint()\n- t.FDTable().SetFlags(fd, kernel.FDFlags{\n+ err := t.FDTable().SetFlagsVFS2(fd, kernel.FDFlags{\nCloseOnExec: flags&linux.FD_CLOEXEC != 0,\n})\n- return 0, nil, nil\n+ return 0, nil, err\ncase linux.F_GETFL:\nreturn uintptr(file.StatusFlags()), nil, nil\ncase linux.F_SETFL:\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/fcntl.cc", "new_path": "test/syscalls/linux/fcntl.cc", "diff": "@@ -115,6 +115,15 @@ PosixErrorOr<Cleanup> SubprocessLock(std::string const& path, bool for_write,\nreturn std::move(cleanup);\n}\n+TEST(FcntlTest, SetCloExecBadFD) {\n+ // Open an eventfd file descriptor with FD_CLOEXEC descriptor flag not set.\n+ FileDescriptor f = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));\n+ auto fd = f.get();\n+ f.reset();\n+ ASSERT_THAT(fcntl(fd, F_GETFD), SyscallFailsWithErrno(EBADF));\n+ ASSERT_THAT(fcntl(fd, F_SETFD, FD_CLOEXEC), SyscallFailsWithErrno(EBADF));\n+}\n+\nTEST(FcntlTest, SetCloExec) {\n// Open an eventfd file descriptor with FD_CLOEXEC descriptor flag not set.\nFileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, 0));\n" } ]
Go
Apache License 2.0
google/gvisor
vfs2: implement fcntl(fd, F_SETFL, flags) PiperOrigin-RevId: 316148074
259,854
12.06.2020 20:47:28
25,200
3b5eaad3c8b49367d1812dbaf4182fc7282b7f00
Allow reading IP_MULTICAST_LOOP and IP_MULTICAST_TTL on TCP sockets. I am not really sure what the point of this is, but someone filed a bug about it, so I assume something relies on it.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/endpoint.go", "new_path": "pkg/tcpip/transport/tcp/endpoint.go", "diff": "@@ -1855,6 +1855,9 @@ func (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) {\nreturn v, nil\n+ case tcpip.MulticastLoopOption:\n+ return true, nil\n+\ndefault:\nreturn false, tcpip.ErrUnknownProtocolOption\n}\n@@ -1922,6 +1925,9 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, *tcpip.Error) {\ne.UnlockUser()\nreturn v, nil\n+ case tcpip.MulticastTTLOption:\n+ return 1, nil\n+\ndefault:\nreturn -1, tcpip.ErrUnknownProtocolOption\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_tcp_generic.cc", "new_path": "test/syscalls/linux/socket_ip_tcp_generic.cc", "diff": "@@ -994,6 +994,30 @@ TEST_P(TCPSocketPairTest, SetTCPWindowClampBelowMinRcvBufConnectedSocket) {\n}\n}\n+TEST_P(TCPSocketPairTest, IpMulticastTtlDefault) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int get = -1;\n+ socklen_t get_len = sizeof(get);\n+ EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_TTL,\n+ &get, &get_len),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(get_len, sizeof(get));\n+ EXPECT_GT(get, 0);\n+}\n+\n+TEST_P(TCPSocketPairTest, IpMulticastLoopDefault) {\n+ auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());\n+\n+ int get = -1;\n+ socklen_t get_len = sizeof(get);\n+ EXPECT_THAT(getsockopt(sockets->first_fd(), IPPROTO_IP, IP_MULTICAST_LOOP,\n+ &get, &get_len),\n+ SyscallSucceedsWithValue(0));\n+ EXPECT_EQ(get_len, sizeof(get));\n+ EXPECT_EQ(get, 1);\n+}\n+\nTEST_P(TCPSocketPairTest, TCPResetDuringClose_NoRandomSave) {\nDisableSave ds; // Too many syscalls.\nconstexpr int kThreadCount = 1000;\n" } ]
Go
Apache License 2.0
google/gvisor
Allow reading IP_MULTICAST_LOOP and IP_MULTICAST_TTL on TCP sockets. I am not really sure what the point of this is, but someone filed a bug about it, so I assume something relies on it. PiperOrigin-RevId: 316225127
259,985
15.06.2020 01:08:00
25,200
f23f62c2c2bcfb3196b67e64b4a73f820f150caa
Correctly set the test VFS environment variable. Also fix test bugs uncovered now that they aren't silently skipped on VFS2. Updates
[ { "change_type": "MODIFY", "old_path": "test/runner/runner.go", "new_path": "test/runner/runner.go", "diff": "@@ -352,11 +352,15 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n// Set environment variables that indicate we are running in gVisor with\n// the given platform, network, and filesystem stack.\n- // TODO(gvisor.dev/issue/1487): Update this when the runner supports VFS2.\nplatformVar := \"TEST_ON_GVISOR\"\nnetworkVar := \"GVISOR_NETWORK\"\n+ env := append(os.Environ(), platformVar+\"=\"+*platform, networkVar+\"=\"+*network)\nvfsVar := \"GVISOR_VFS\"\n- env := append(os.Environ(), platformVar+\"=\"+*platform, networkVar+\"=\"+*network, vfsVar+\"=VFS1\")\n+ if *vfs2 {\n+ env = append(env, vfsVar+\"=VFS2\")\n+ } else {\n+ env = append(env, vfsVar+\"=VFS1\")\n+ }\n// Remove env variables that cause the gunit binary to write output\n// files, since they will stomp on eachother, and on the output files\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -68,6 +68,11 @@ syscall_test(\nvfs2 = \"True\",\n)\n+syscall_test(\n+ test = \"//test/syscalls/linux:socket_capability_test\",\n+ vfs2 = \"True\",\n+)\n+\nsyscall_test(\nsize = \"large\",\nshard_count = 50,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/BUILD", "new_path": "test/syscalls/linux/BUILD", "diff": "@@ -363,14 +363,29 @@ cc_binary(\nlinkstatic = 1,\ndeps = [\n\":socket_test_util\",\n- \"//test/util:file_descriptor\",\ngtest,\n+ \"//test/util:file_descriptor\",\n\"//test/util:temp_umask\",\n\"//test/util:test_main\",\n\"//test/util:test_util\",\n],\n)\n+cc_binary(\n+ name = \"socket_capability_test\",\n+ testonly = 1,\n+ srcs = [\"socket_capability.cc\"],\n+ linkstatic = 1,\n+ deps = [\n+ \":socket_test_util\",\n+ \"//test/util:capability_util\",\n+ \"//test/util:file_descriptor\",\n+ gtest,\n+ \"//test/util:test_main\",\n+ \"//test/util:test_util\",\n+ ],\n+)\n+\ncc_binary(\nname = \"brk_test\",\ntestonly = 1,\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket.cc", "new_path": "test/syscalls/linux/socket.cc", "diff": "@@ -86,37 +86,12 @@ TEST(SocketTest, UnixSocketStat) {\nEXPECT_EQ(statbuf.st_mode, S_IFSOCK | sock_perm & ~mask);\n// Timestamps should be equal and non-zero.\n+ // TODO(b/158882152): Sockets currently don't implement timestamps.\n+ if (!IsRunningOnGvisor()) {\nEXPECT_NE(statbuf.st_atime, 0);\nEXPECT_EQ(statbuf.st_atime, statbuf.st_mtime);\nEXPECT_EQ(statbuf.st_atime, statbuf.st_ctime);\n}\n-\n-TEST(SocketTest, UnixConnectNeedsWritePerm) {\n- SKIP_IF(IsRunningWithVFS1());\n-\n- FileDescriptor bound =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));\n-\n- struct sockaddr_un addr =\n- ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(/*abstract=*/false, AF_UNIX));\n- ASSERT_THAT(bind(bound.get(), reinterpret_cast<struct sockaddr*>(&addr),\n- sizeof(addr)),\n- SyscallSucceeds());\n- ASSERT_THAT(listen(bound.get(), 1), SyscallSucceeds());\n-\n- // Connect should fail without write perms.\n- ASSERT_THAT(chmod(addr.sun_path, 0500), SyscallSucceeds());\n- FileDescriptor client =\n- ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));\n- EXPECT_THAT(connect(client.get(), reinterpret_cast<struct sockaddr*>(&addr),\n- sizeof(addr)),\n- SyscallFailsWithErrno(EACCES));\n-\n- // Connect should succeed with write perms.\n- ASSERT_THAT(chmod(addr.sun_path, 0200), SyscallSucceeds());\n- EXPECT_THAT(connect(client.get(), reinterpret_cast<struct sockaddr*>(&addr),\n- sizeof(addr)),\n- SyscallSucceeds());\n}\nusing SocketOpenTest = ::testing::TestWithParam<int>;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/syscalls/linux/socket_capability.cc", "diff": "+// Copyright 2018 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Subset of socket tests that need Linux-specific headers (compared to POSIX\n+// headers).\n+\n+#include \"gtest/gtest.h\"\n+#include \"test/syscalls/linux/socket_test_util.h\"\n+#include \"test/util/capability_util.h\"\n+#include \"test/util/file_descriptor.h\"\n+#include \"test/util/test_util.h\"\n+\n+namespace gvisor {\n+namespace testing {\n+\n+TEST(SocketTest, UnixConnectNeedsWritePerm) {\n+ SKIP_IF(IsRunningWithVFS1());\n+\n+ FileDescriptor bound =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));\n+\n+ struct sockaddr_un addr =\n+ ASSERT_NO_ERRNO_AND_VALUE(UniqueUnixAddr(/*abstract=*/false, AF_UNIX));\n+ ASSERT_THAT(bind(bound.get(), reinterpret_cast<struct sockaddr*>(&addr),\n+ sizeof(addr)),\n+ SyscallSucceeds());\n+ ASSERT_THAT(listen(bound.get(), 1), SyscallSucceeds());\n+\n+ // Drop capabilites that allow us to override permision checks. Otherwise if\n+ // the test is run as root, the connect below will bypass permission checks\n+ // and succeed unexpectedly.\n+ ASSERT_NO_ERRNO(SetCapability(CAP_DAC_OVERRIDE, false));\n+\n+ // Connect should fail without write perms.\n+ ASSERT_THAT(chmod(addr.sun_path, 0500), SyscallSucceeds());\n+ FileDescriptor client =\n+ ASSERT_NO_ERRNO_AND_VALUE(Socket(AF_UNIX, SOCK_STREAM, PF_UNIX));\n+ ASSERT_THAT(connect(client.get(), reinterpret_cast<struct sockaddr*>(&addr),\n+ sizeof(addr)),\n+ SyscallFailsWithErrno(EACCES));\n+\n+ // Connect should succeed with write perms.\n+ ASSERT_THAT(chmod(addr.sun_path, 0200), SyscallSucceeds());\n+ EXPECT_THAT(connect(client.get(), reinterpret_cast<struct sockaddr*>(&addr),\n+ sizeof(addr)),\n+ SyscallSucceeds());\n+}\n+\n+} // namespace testing\n+} // namespace gvisor\n" } ]
Go
Apache License 2.0
google/gvisor
Correctly set the test VFS environment variable. Also fix test bugs uncovered now that they aren't silently skipped on VFS2. Updates #1487. PiperOrigin-RevId: 316415807
259,881
15.06.2020 13:34:33
25,200
885605c5e412f9ced42766094368c86be1002cff
Remove blacklist from //test/runtimes Updates
[ { "change_type": "MODIFY", "old_path": "test/runtimes/BUILD", "new_path": "test/runtimes/BUILD", "diff": "@@ -4,30 +4,30 @@ package(licenses = [\"notice\"])\nruntime_test(\nname = \"go1.12\",\n- blacklist_file = \"blacklist_go1.12.csv\",\n+ exclude_file = \"exclude_go1.12.csv\",\nlang = \"go\",\n)\nruntime_test(\nname = \"java11\",\n- blacklist_file = \"blacklist_java11.csv\",\n+ exclude_file = \"exclude_java11.csv\",\nlang = \"java\",\n)\nruntime_test(\nname = \"nodejs12.4.0\",\n- blacklist_file = \"blacklist_nodejs12.4.0.csv\",\n+ exclude_file = \"exclude_nodejs12.4.0.csv\",\nlang = \"nodejs\",\n)\nruntime_test(\nname = \"php7.3.6\",\n- blacklist_file = \"blacklist_php7.3.6.csv\",\n+ exclude_file = \"exclude_php7.3.6.csv\",\nlang = \"php\",\n)\nruntime_test(\nname = \"python3.7.3\",\n- blacklist_file = \"blacklist_python3.7.3.csv\",\n+ exclude_file = \"exclude_python3.7.3.csv\",\nlang = \"python\",\n)\n" }, { "change_type": "MODIFY", "old_path": "test/runtimes/defs.bzl", "new_path": "test/runtimes/defs.bzl", "diff": "@@ -10,10 +10,10 @@ def _runtime_test_impl(ctx):\n\"--image\",\nctx.attr.image,\n]\n- if ctx.attr.blacklist_file:\n+ if ctx.attr.exclude_file:\nargs += [\n- \"--blacklist_file\",\n- ctx.files.blacklist_file[0].short_path,\n+ \"--exclude_file\",\n+ ctx.files.exclude_file[0].short_path,\n]\n# Build a runner.\n@@ -28,7 +28,7 @@ def _runtime_test_impl(ctx):\nreturn [DefaultInfo(\nexecutable = runner,\nrunfiles = ctx.runfiles(\n- files = ctx.files._runner + ctx.files.blacklist_file + ctx.files._proctor,\n+ files = ctx.files._runner + ctx.files.exclude_file + ctx.files._proctor,\ncollect_default = True,\ncollect_data = True,\n),\n@@ -43,7 +43,7 @@ _runtime_test = rule(\n\"lang\": attr.string(\nmandatory = True,\n),\n- \"blacklist_file\": attr.label(\n+ \"exclude_file\": attr.label(\nmandatory = False,\nallow_single_file = True,\n),\n@@ -68,12 +68,12 @@ def runtime_test(name, **kwargs):\n**kwargs\n)\n-def blacklist_test(name, blacklist_file):\n- \"\"\"Test that a blacklist parses correctly.\"\"\"\n+def exclude_test(name, exclude_file):\n+ \"\"\"Test that a exclude file parses correctly.\"\"\"\ngo_test(\n- name = name + \"_blacklist_test\",\n+ name = name + \"_exclude_test\",\nlibrary = \":runner\",\n- srcs = [\"blacklist_test.go\"],\n- args = [\"--blacklist_file\", \"test/runtimes/\" + blacklist_file],\n- data = [blacklist_file],\n+ srcs = [\"exclude_test.go\"],\n+ args = [\"--exclude_file\", \"test/runtimes/\" + exclude_file],\n+ data = [exclude_file],\n)\n" }, { "change_type": "RENAME", "old_path": "test/runtimes/blacklist_go1.12.csv", "new_path": "test/runtimes/exclude_go1.12.csv", "diff": "" }, { "change_type": "RENAME", "old_path": "test/runtimes/blacklist_java11.csv", "new_path": "test/runtimes/exclude_java11.csv", "diff": "" }, { "change_type": "RENAME", "old_path": "test/runtimes/blacklist_nodejs12.4.0.csv", "new_path": "test/runtimes/exclude_nodejs12.4.0.csv", "diff": "" }, { "change_type": "RENAME", "old_path": "test/runtimes/blacklist_php7.3.6.csv", "new_path": "test/runtimes/exclude_php7.3.6.csv", "diff": "" }, { "change_type": "RENAME", "old_path": "test/runtimes/blacklist_python3.7.3.csv", "new_path": "test/runtimes/exclude_python3.7.3.csv", "diff": "" }, { "change_type": "MODIFY", "old_path": "test/runtimes/runner/BUILD", "new_path": "test/runtimes/runner/BUILD", "diff": "@@ -14,8 +14,8 @@ go_binary(\n)\ngo_test(\n- name = \"blacklist_test\",\n+ name = \"exclude_test\",\nsize = \"small\",\n- srcs = [\"blacklist_test.go\"],\n+ srcs = [\"exclude_test.go\"],\nlibrary = \":runner\",\n)\n" }, { "change_type": "RENAME", "old_path": "test/runtimes/runner/blacklist_test.go", "new_path": "test/runtimes/runner/exclude_test.go", "diff": "@@ -25,13 +25,13 @@ func TestMain(m *testing.M) {\nos.Exit(m.Run())\n}\n-// Test that the blacklist parses without error.\n+// Test that the exclude file parses without error.\nfunc TestBlacklists(t *testing.T) {\n- bl, err := getBlacklist()\n+ ex, err := getExcludes()\nif err != nil {\n- t.Fatalf(\"error parsing blacklist: %v\", err)\n+ t.Fatalf(\"error parsing exclude file: %v\", err)\n}\n- if *blacklistFile != \"\" && len(bl) == 0 {\n- t.Errorf(\"got empty blacklist for file %q\", *blacklistFile)\n+ if *excludeFile != \"\" && len(ex) == 0 {\n+ t.Errorf(\"got empty excludes for file %q\", *excludeFile)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/runtimes/runner/main.go", "new_path": "test/runtimes/runner/main.go", "diff": "@@ -33,7 +33,7 @@ import (\nvar (\nlang = flag.String(\"lang\", \"\", \"language runtime to test\")\nimage = flag.String(\"image\", \"\", \"docker image with runtime tests\")\n- blacklistFile = flag.String(\"blacklist_file\", \"\", \"file containing blacklist of tests to exclude, in CSV format with fields: test name, bug id, comment\")\n+ excludeFile = flag.String(\"exclude_file\", \"\", \"file containing list of tests to exclude, in CSV format with fields: test name, bug id, comment\")\n)\n// Wait time for each test to run.\n@@ -52,10 +52,10 @@ func main() {\n// defered functions before exiting. It returns an exit code that should be\n// passed to os.Exit.\nfunc runTests() int {\n- // Get tests to blacklist.\n- blacklist, err := getBlacklist()\n+ // Get tests to exclude..\n+ excludes, err := getExcludes()\nif err != nil {\n- fmt.Fprintf(os.Stderr, \"Error getting blacklist: %s\\n\", err.Error())\n+ fmt.Fprintf(os.Stderr, \"Error getting exclude list: %s\\n\", err.Error())\nreturn 1\n}\n@@ -66,7 +66,7 @@ func runTests() int {\n// Get a slice of tests to run. This will also start a single Docker\n// container that will be used to run each test. The final test will\n// stop the Docker container.\n- tests, err := getTests(d, blacklist)\n+ tests, err := getTests(d, excludes)\nif err != nil {\nfmt.Fprintf(os.Stderr, \"%s\\n\", err.Error())\nreturn 1\n@@ -77,7 +77,7 @@ func runTests() int {\n}\n// getTests executes all tests as table tests.\n-func getTests(d *dockerutil.Docker, blacklist map[string]struct{}) ([]testing.InternalTest, error) {\n+func getTests(d *dockerutil.Docker, excludes map[string]struct{}) ([]testing.InternalTest, error) {\n// Start the container.\nd.CopyFiles(\"/proctor\", \"test/runtimes/proctor/proctor\")\nif err := d.Spawn(dockerutil.RunOpts{\n@@ -108,9 +108,9 @@ func getTests(d *dockerutil.Docker, blacklist map[string]struct{}) ([]testing.In\nitests = append(itests, testing.InternalTest{\nName: tc,\nF: func(t *testing.T) {\n- // Is the test blacklisted?\n- if _, ok := blacklist[tc]; ok {\n- t.Skipf(\"SKIP: blacklisted test %q\", tc)\n+ // Is the test excluded?\n+ if _, ok := excludes[tc]; ok {\n+ t.Skipf(\"SKIP: excluded test %q\", tc)\n}\nvar (\n@@ -143,14 +143,14 @@ func getTests(d *dockerutil.Docker, blacklist map[string]struct{}) ([]testing.In\nreturn itests, nil\n}\n-// getBlacklist reads the blacklist file and returns a set of test names to\n+// getBlacklist reads the exclude file and returns a set of test names to\n// exclude.\n-func getBlacklist() (map[string]struct{}, error) {\n- blacklist := make(map[string]struct{})\n- if *blacklistFile == \"\" {\n- return blacklist, nil\n+func getExcludes() (map[string]struct{}, error) {\n+ excludes := make(map[string]struct{})\n+ if *excludeFile == \"\" {\n+ return excludes, nil\n}\n- f, err := os.Open(*blacklistFile)\n+ f, err := os.Open(*excludeFile)\nif err != nil {\nreturn nil, err\n}\n@@ -171,9 +171,9 @@ func getBlacklist() (map[string]struct{}, error) {\nif err != nil {\nreturn nil, err\n}\n- blacklist[record[0]] = struct{}{}\n+ excludes[record[0]] = struct{}{}\n}\n- return blacklist, nil\n+ return excludes, nil\n}\n// testDeps implements testing.testDeps (an unexported interface), and is\n" } ]
Go
Apache License 2.0
google/gvisor
Remove blacklist from //test/runtimes Updates #2972 PiperOrigin-RevId: 316534165
259,885
16.06.2020 00:14:07
25,200
3b0b1f104d963a1d11973c444934e6744ab7e79b
Miscellaneous VFS2 fixes.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -835,6 +835,14 @@ func (d *dentry) statTo(stat *linux.Statx) {\nstat.Mask = linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_NLINK | linux.STATX_UID | linux.STATX_GID | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME | linux.STATX_INO | linux.STATX_SIZE | linux.STATX_BLOCKS | linux.STATX_BTIME\nstat.Blksize = atomic.LoadUint32(&d.blockSize)\nstat.Nlink = atomic.LoadUint32(&d.nlink)\n+ if stat.Nlink == 0 {\n+ // The remote filesystem doesn't support link count; just make\n+ // something up. This is consistent with Linux, where\n+ // fs/inode.c:inode_init_always() initializes link count to 1, and\n+ // fs/9p/vfs_inode_dotl.c:v9fs_stat2inode_dotl() doesn't touch it if\n+ // it's not provided by the remote filesystem.\n+ stat.Nlink = 1\n+ }\nstat.UID = atomic.LoadUint32(&d.uid)\nstat.GID = atomic.LoadUint32(&d.gid)\nstat.Mode = uint16(atomic.LoadUint32(&d.mode))\n@@ -1346,23 +1354,21 @@ func (d *dentry) ensureSharedHandle(ctx context.Context, read, write, trunc bool\n}\n// incLinks increments link count.\n-//\n-// Preconditions: d.nlink != 0 && d.nlink < math.MaxUint32.\nfunc (d *dentry) incLinks() {\n- v := atomic.AddUint32(&d.nlink, 1)\n- if v < 2 {\n- panic(fmt.Sprintf(\"dentry.nlink is invalid (was 0 or overflowed): %d\", v))\n+ if atomic.LoadUint32(&d.nlink) == 0 {\n+ // The remote filesystem doesn't support link count.\n+ return\n}\n+ atomic.AddUint32(&d.nlink, 1)\n}\n// decLinks decrements link count.\n-//\n-// Preconditions: d.nlink > 1.\nfunc (d *dentry) decLinks() {\n- v := atomic.AddUint32(&d.nlink, ^uint32(0))\n- if v == 0 {\n- panic(fmt.Sprintf(\"dentry.nlink must be greater than 0: %d\", v))\n+ if atomic.LoadUint32(&d.nlink) == 0 {\n+ // The remote filesystem doesn't support link count.\n+ return\n}\n+ atomic.AddUint32(&d.nlink, ^uint32(0))\n}\n// fileDescription is embedded by gofer implementations of\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/kernel/kernel.go", "new_path": "pkg/sentry/kernel/kernel.go", "diff": "@@ -892,7 +892,7 @@ func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, ThreadID,\nif mntnsVFS2 == nil {\n// MountNamespaceVFS2 adds a reference to the namespace, which is\n// transferred to the new process.\n- mntnsVFS2 = k.GlobalInit().Leader().MountNamespaceVFS2()\n+ mntnsVFS2 = k.globalInit.Leader().MountNamespaceVFS2()\n}\n// Get the root directory from the MountNamespace.\nroot := args.MountNamespaceVFS2.Root()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/ioctl.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/ioctl.go", "diff": "package vfs2\nimport (\n+ \"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/sentry/arch\"\n\"gvisor.dev/gvisor/pkg/sentry/kernel\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n@@ -30,6 +31,34 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n}\ndefer file.DecRef()\n+ // Handle ioctls that apply to all FDs.\n+ switch args[1].Int() {\n+ case linux.FIONCLEX:\n+ t.FDTable().SetFlagsVFS2(fd, kernel.FDFlags{\n+ CloseOnExec: false,\n+ })\n+ return 0, nil, nil\n+\n+ case linux.FIOCLEX:\n+ t.FDTable().SetFlagsVFS2(fd, kernel.FDFlags{\n+ CloseOnExec: true,\n+ })\n+ return 0, nil, nil\n+\n+ case linux.FIONBIO:\n+ var set int32\n+ if _, err := t.CopyIn(args[2].Pointer(), &set); err != nil {\n+ return 0, nil, err\n+ }\n+ flags := file.StatusFlags()\n+ if set != 0 {\n+ flags |= linux.O_NONBLOCK\n+ } else {\n+ flags &^= linux.O_NONBLOCK\n+ }\n+ return 0, nil, file.SetStatusFlags(t, t.Credentials(), flags)\n+ }\n+\nret, err := file.Ioctl(t, t.MemoryManager(), args)\nreturn ret, nil, err\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Miscellaneous VFS2 fixes. PiperOrigin-RevId: 316627764
259,992
16.06.2020 10:50:29
25,200
bae1475603b03a38726da743430c761fb36ee338
Print spec as json when --debug is enabled The previous format skipped many important structs that are pointers, especially for cgroups. Change to print as json, removing parts of the spec that are not relevant. Also removed debug message from gofer that can be very noisy when directories are large.
[ { "change_type": "MODIFY", "old_path": "runsc/fsgofer/fsgofer.go", "new_path": "runsc/fsgofer/fsgofer.go", "diff": "@@ -175,8 +175,6 @@ func (a *attachPoint) makeQID(stat syscall.Stat_t) p9.QID {\nlog.Warningf(\"first 8 bytes of host inode id %x will be truncated to construct virtual inode id\", stat.Ino)\n}\nino := uint64(dev)<<56 | maskedIno\n- log.Debugf(\"host inode %x on device %x mapped to virtual inode %x\", stat.Ino, stat.Dev, ino)\n-\nreturn p9.QID{\nType: p9.FileMode(stat.Mode).QIDType(),\nPath: ino,\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/BUILD", "new_path": "runsc/specutils/BUILD", "diff": "@@ -17,6 +17,7 @@ go_library(\n\"//pkg/log\",\n\"//pkg/sentry/kernel/auth\",\n\"@com_github_cenkalti_backoff//:go_default_library\",\n+ \"@com_github_mohae_deepcopy//:go_default_library\",\n\"@com_github_opencontainers_runtime-spec//specs-go:go_default_library\",\n\"@com_github_syndtr_gocapability//capability:go_default_library\",\n\"@org_golang_x_sys//unix:go_default_library\",\n" }, { "change_type": "MODIFY", "old_path": "runsc/specutils/specutils.go", "new_path": "runsc/specutils/specutils.go", "diff": "@@ -29,6 +29,7 @@ import (\n\"time\"\n\"github.com/cenkalti/backoff\"\n+ \"github.com/mohae/deepcopy\"\nspecs \"github.com/opencontainers/runtime-spec/specs-go\"\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/bits\"\n@@ -44,20 +45,31 @@ var ExePath = \"/proc/self/exe\"\nvar Version = specs.Version\n// LogSpec logs the spec in a human-friendly way.\n-func LogSpec(spec *specs.Spec) {\n- log.Debugf(\"Spec: %+v\", spec)\n- log.Debugf(\"Spec.Hooks: %+v\", spec.Hooks)\n- log.Debugf(\"Spec.Linux: %+v\", spec.Linux)\n- if spec.Linux != nil && spec.Linux.Resources != nil {\n- res := spec.Linux.Resources\n- log.Debugf(\"Spec.Linux.Resources.Memory: %+v\", res.Memory)\n- log.Debugf(\"Spec.Linux.Resources.CPU: %+v\", res.CPU)\n- log.Debugf(\"Spec.Linux.Resources.BlockIO: %+v\", res.BlockIO)\n- log.Debugf(\"Spec.Linux.Resources.Network: %+v\", res.Network)\n- }\n- log.Debugf(\"Spec.Process: %+v\", spec.Process)\n- log.Debugf(\"Spec.Root: %+v\", spec.Root)\n- log.Debugf(\"Spec.Mounts: %+v\", spec.Mounts)\n+func LogSpec(orig *specs.Spec) {\n+ if !log.IsLogging(log.Debug) {\n+ return\n+ }\n+\n+ // Strip down parts of the spec that are not interesting.\n+ spec := deepcopy.Copy(orig).(*specs.Spec)\n+ if spec.Process != nil {\n+ spec.Process.Capabilities = nil\n+ }\n+ if spec.Linux != nil {\n+ spec.Linux.Seccomp = nil\n+ spec.Linux.MaskedPaths = nil\n+ spec.Linux.ReadonlyPaths = nil\n+ if spec.Linux.Resources != nil {\n+ spec.Linux.Resources.Devices = nil\n+ }\n+ }\n+\n+ out, err := json.MarshalIndent(spec, \"\", \" \")\n+ if err != nil {\n+ log.Debugf(\"Failed to marshal spec: %v\", err)\n+ return\n+ }\n+ log.Debugf(\"Spec:\\n%s\", out)\n}\n// ValidateSpec validates that the spec is compatible with runsc.\n" } ]
Go
Apache License 2.0
google/gvisor
Print spec as json when --debug is enabled The previous format skipped many important structs that are pointers, especially for cgroups. Change to print as json, removing parts of the spec that are not relevant. Also removed debug message from gofer that can be very noisy when directories are large. PiperOrigin-RevId: 316713267
259,885
16.06.2020 16:14:26
25,200
e61acfb5eaec4450587116573f708284a0fe7849
Correctly handle multiple resizings in pgalloc.findAvailableRange().
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/pgalloc/pgalloc.go", "new_path": "pkg/sentry/pgalloc/pgalloc.go", "diff": "@@ -441,53 +441,61 @@ func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (platform.Fi\n// Precondition: alignment must be a power of 2.\nfunc findAvailableRange(usage *usageSet, fileSize int64, length, alignment uint64) (platform.FileRange, bool) {\nalignmentMask := alignment - 1\n- for gap := usage.UpperBoundGap(uint64(fileSize)); gap.Ok(); gap = gap.PrevLargeEnoughGap(length) {\n- // Start searching only at end of file.\n+\n+ // Search for space in existing gaps, starting at the current end of the\n+ // file and working backward.\n+ lastGap := usage.LastGap()\n+ gap := lastGap\n+ for {\nend := gap.End()\nif end > uint64(fileSize) {\nend = uint64(fileSize)\n}\n- // Start at the top and align downwards.\n- start := end - length\n- if start > end {\n- break // Underflow.\n+ // Try to allocate from the end of this gap, with the start of the\n+ // allocated range aligned down to alignment.\n+ unalignedStart := end - length\n+ if unalignedStart > end {\n+ // Negative overflow: this and all preceding gaps are too small to\n+ // accommodate length.\n+ break\n}\n- start &^= alignmentMask\n-\n- // Is the gap still sufficient?\n- if start < gap.Start() {\n- continue\n+ if start := unalignedStart &^ alignmentMask; start >= gap.Start() {\n+ return platform.FileRange{start, start + length}, true\n}\n- // Allocate in the given gap.\n- return platform.FileRange{start, start + length}, true\n+ gap = gap.PrevLargeEnoughGap(length)\n+ if !gap.Ok() {\n+ break\n+ }\n}\n// Check that it's possible to fit this allocation at the end of a file of any size.\n- min := usage.LastGap().Start()\n+ min := lastGap.Start()\nmin = (min + alignmentMask) &^ alignmentMask\nif min+length < min {\n- // Overflow.\n+ // Overflow: allocation would exceed the range of uint64.\nreturn platform.FileRange{}, false\n}\n// Determine the minimum file size required to fit this allocation at its end.\nfor {\n- if fileSize >= 2*fileSize {\n- // Is this because it's initially empty?\n- if fileSize == 0 {\n- fileSize += chunkSize\n- } else {\n- // fileSize overflow.\n+ newFileSize := 2 * fileSize\n+ if newFileSize <= fileSize {\n+ if fileSize != 0 {\n+ // Overflow: allocation would exceed the range of int64.\nreturn platform.FileRange{}, false\n}\n- } else {\n- // Double the current fileSize.\n- fileSize *= 2\n+ newFileSize = chunkSize\n+ }\n+ fileSize = newFileSize\n+\n+ unalignedStart := uint64(fileSize) - length\n+ if unalignedStart > uint64(fileSize) {\n+ // Negative overflow: fileSize is still inadequate.\n+ continue\n}\n- start := (uint64(fileSize) - length) &^ alignmentMask\n- if start >= min {\n+ if start := unalignedStart &^ alignmentMask; start >= min {\nreturn platform.FileRange{start, start + length}, true\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/pgalloc/pgalloc_test.go", "new_path": "pkg/sentry/pgalloc/pgalloc_test.go", "diff": "@@ -142,6 +142,14 @@ func TestFindUnallocatedRange(t *testing.T) {\nalignment: hugepage,\nstart: hugepage,\n},\n+ {\n+ desc: \"Allocation doubles file size more than once if necessary\",\n+ usage: &usageSegmentDataSlices{},\n+ fileSize: page,\n+ length: 4 * page,\n+ alignment: page,\n+ start: 0,\n+ },\n{\ndesc: \"Allocations are compact if possible\",\nusage: &usageSegmentDataSlices{\n" } ]
Go
Apache License 2.0
google/gvisor
Correctly handle multiple resizings in pgalloc.findAvailableRange(). PiperOrigin-RevId: 316778032
260,023
17.06.2020 10:45:03
25,200
50afec55c75dfd5bb88d100f77e17048dfbb3322
TCP stat fixes Ensure that CurrentConnected stat is updated on any errors and cleanups during connected state processing. Fixes
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/connect.go", "new_path": "pkg/tcpip/transport/tcp/connect.go", "diff": "@@ -1516,6 +1516,7 @@ func (e *endpoint) protocolMainLoop(handshake bool, wakerInitDone chan<- struct{\n// Main loop. Handle segments until both send and receive ends of the\n// connection have completed.\ncleanupOnError := func(err *tcpip.Error) {\n+ e.stack.Stats().TCP.CurrentConnected.Decrement()\ne.workerCleanup = true\nif err != nil {\ne.resetConnectionLocked(err)\n@@ -1568,11 +1569,14 @@ loop:\nreuseTW = e.doTimeWait()\n}\n- // Mark endpoint as closed.\n- if e.EndpointState() != StateError {\n- e.transitionToStateCloseLocked()\n+ // Handle any StateError transition from StateTimeWait.\n+ if e.EndpointState() == StateError {\n+ cleanupOnError(nil)\n+ return nil\n}\n+ e.transitionToStateCloseLocked()\n+\n// Lock released below.\nepilogue()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/tcp_test.go", "new_path": "pkg/tcpip/transport/tcp/tcp_test.go", "diff": "@@ -2967,6 +2967,9 @@ loop:\nif got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {\nt.Errorf(\"got stats.TCP.CurrentEstablished.Value() = %d, want = 0\", got)\n}\n+ if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {\n+ t.Errorf(\"got stats.TCP.CurrentConnected.Value() = %d, want = 0\", got)\n+ }\n}\nfunc TestSendOnResetConnection(t *testing.T) {\n@@ -3050,6 +3053,9 @@ func TestMaxRetransmitsTimeout(t *testing.T) {\nif got := c.Stack().Stats().TCP.EstablishedTimedout.Value(); got != 1 {\nt.Errorf(\"got c.Stack().Stats().TCP.EstablishedTimedout.Value() = %d, want = 1\", got)\n}\n+ if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {\n+ t.Errorf(\"got stats.TCP.CurrentConnected.Value() = %d, want = 0\", got)\n+ }\n}\n// TestMaxRTO tests if the retransmit interval caps to MaxRTO.\n@@ -4754,6 +4760,9 @@ func TestKeepalive(t *testing.T) {\nif got := c.Stack().Stats().TCP.CurrentEstablished.Value(); got != 0 {\nt.Errorf(\"got stats.TCP.CurrentEstablished.Value() = %d, want = 0\", got)\n}\n+ if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {\n+ t.Errorf(\"got stats.TCP.CurrentConnected.Value() = %d, want = 0\", got)\n+ }\n}\nfunc executeHandshake(t *testing.T, c *context.Context, srcPort uint16, synCookieInUse bool) (irs, iss seqnum.Value) {\n@@ -6771,6 +6780,9 @@ func TestTCPUserTimeout(t *testing.T) {\nif got, want := c.Stack().Stats().TCP.EstablishedTimedout.Value(), origEstablishedTimedout+1; got != want {\nt.Errorf(\"got c.Stack().Stats().TCP.EstablishedTimedout = %d, want = %d\", got, want)\n}\n+ if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {\n+ t.Errorf(\"got stats.TCP.CurrentConnected.Value() = %d, want = 0\", got)\n+ }\n}\nfunc TestKeepaliveWithUserTimeout(t *testing.T) {\n@@ -6842,6 +6854,9 @@ func TestKeepaliveWithUserTimeout(t *testing.T) {\nif got, want := c.Stack().Stats().TCP.EstablishedTimedout.Value(), origEstablishedTimedout+1; got != want {\nt.Errorf(\"got c.Stack().Stats().TCP.EstablishedTimedout = %d, want = %d\", got, want)\n}\n+ if got := c.Stack().Stats().TCP.CurrentConnected.Value(); got != 0 {\n+ t.Errorf(\"got stats.TCP.CurrentConnected.Value() = %d, want = 0\", got)\n+ }\n}\nfunc TestIncreaseWindowOnReceive(t *testing.T) {\n" } ]
Go
Apache License 2.0
google/gvisor
TCP stat fixes Ensure that CurrentConnected stat is updated on any errors and cleanups during connected state processing. Fixes #2968 PiperOrigin-RevId: 316919426
259,860
17.06.2020 11:32:23
25,200
e5d97cbcc1e64185b8fab1cf563c8754edd2e52e
Refactor host.canMap. Simplify the canMap check. We do not have plans to allow mmap for anything beyond regular files, so we can just inline canMap() as a simple file mode check. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -91,7 +91,9 @@ func NewFD(ctx context.Context, mnt *vfs.Mount, hostFD int, opts *NewFDOptions)\nisTTY: opts.IsTTY,\nwouldBlock: wouldBlock(uint32(fileType)),\nseekable: seekable,\n- canMap: canMap(uint32(fileType)),\n+ // NOTE(b/38213152): Technically, some obscure char devices can be memory\n+ // mapped, but we only allow regular files.\n+ canMap: fileType == linux.S_IFREG,\n}\ni.pf.inode = i\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/util.go", "new_path": "pkg/sentry/fsimpl/host/util.go", "diff": "@@ -49,16 +49,6 @@ func wouldBlock(fileType uint32) bool {\nreturn fileType == syscall.S_IFIFO || fileType == syscall.S_IFCHR || fileType == syscall.S_IFSOCK\n}\n-// canMap returns true if a file with fileType is allowed to be memory mapped.\n-// This is ported over from VFS1, but it's probably not the best way for us\n-// to check if a file can be memory mapped.\n-func canMap(fileType uint32) bool {\n- // TODO(gvisor.dev/issue/1672): Also allow \"special files\" to be mapped (see fs/host:canMap()).\n- //\n- // TODO(b/38213152): Some obscure character devices can be mapped.\n- return fileType == syscall.S_IFREG\n-}\n-\n// isBlockError checks if an error is EAGAIN or EWOULDBLOCK.\n// If so, they can be transformed into syserror.ErrWouldBlock.\nfunc isBlockError(err error) bool {\n" } ]
Go
Apache License 2.0
google/gvisor
Refactor host.canMap. Simplify the canMap check. We do not have plans to allow mmap for anything beyond regular files, so we can just inline canMap() as a simple file mode check. Updates #1672. PiperOrigin-RevId: 316929654
259,881
17.06.2020 12:32:59
25,200
6d806ee7198422973a2e4efa9b539de7792b933f
Remove various uses of 'blacklist' Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/host/tty.go", "new_path": "pkg/sentry/fs/host/tty.go", "diff": "@@ -308,9 +308,9 @@ func (t *TTYFileOperations) checkChange(ctx context.Context, sig linux.Signal) e\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\n// No task? Linux does not have an analog for this case, but\n- // tty_check_change is more of a blacklist of cases than a\n- // whitelist, and is surprisingly permissive. Allowing the\n- // change seems most appropriate.\n+ // tty_check_change only blocks specific cases and is\n+ // surprisingly permissive. Allowing the change seems\n+ // appropriate.\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/tty.go", "new_path": "pkg/sentry/fsimpl/host/tty.go", "diff": "@@ -326,9 +326,9 @@ func (t *TTYFileDescription) checkChange(ctx context.Context, sig linux.Signal)\ntask := kernel.TaskFromContext(ctx)\nif task == nil {\n// No task? Linux does not have an analog for this case, but\n- // tty_check_change is more of a blacklist of cases than a\n- // whitelist, and is surprisingly permissive. Allowing the\n- // change seems most appropriate.\n+ // tty_check_change only blocks specific cases and is\n+ // surprisingly permissive. Allowing the change seems\n+ // appropriate.\nreturn nil\n}\n" }, { "change_type": "MODIFY", "old_path": "test/runner/runner.go", "new_path": "test/runner/runner.go", "diff": "@@ -391,12 +391,12 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {\n}\n}\n-// filterEnv returns an environment with the blacklisted variables removed.\n-func filterEnv(env, blacklist []string) []string {\n+// filterEnv returns an environment with the excluded variables removed.\n+func filterEnv(env, exclude []string) []string {\nvar out []string\nfor _, kv := range env {\nok := true\n- for _, k := range blacklist {\n+ for _, k := range exclude {\nif strings.HasPrefix(kv, k+\"=\") {\nok = false\nbreak\n" }, { "change_type": "MODIFY", "old_path": "tools/nogo/matchers.go", "new_path": "tools/nogo/matchers.go", "diff": "@@ -89,7 +89,7 @@ func (r resultExcluded) ShouldReport(d analysis.Diagnostic, _ *token.FileSet) bo\nreturn false\n}\n}\n- return true // Not blacklisted.\n+ return true // Not excluded.\n}\n// andMatcher is a composite matcher.\n" } ]
Go
Apache License 2.0
google/gvisor
Remove various uses of 'blacklist' Updates #2972 PiperOrigin-RevId: 316942245
259,962
17.06.2020 14:07:27
25,200
505e8f4e3d72400194a69c220ce2f4288e957c4c
Fix TtlDefault test on linux. Different flavors of linux seem to use different defaults we accept 64 or 127 as the TtlDefault in the test.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/socket_ip_unbound.cc", "new_path": "test/syscalls/linux/socket_ip_unbound.cc", "diff": "@@ -40,7 +40,7 @@ TEST_P(IPUnboundSocketTest, TtlDefault) {\nsocklen_t get_sz = sizeof(get);\nEXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get, &get_sz),\nSyscallSucceedsWithValue(0));\n- EXPECT_EQ(get, 64);\n+ EXPECT_TRUE(get == 64 || get == 127);\nEXPECT_EQ(get_sz, sizeof(get));\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Fix TtlDefault test on linux. Different flavors of linux seem to use different defaults we accept 64 or 127 as the TtlDefault in the test. PiperOrigin-RevId: 316961150
259,854
17.06.2020 15:14:17
25,200
02072fd243a1e88b635dd6a24dd512608fbfce5f
Add test for stat("/proc/net/snmp").
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc_net.cc", "new_path": "test/syscalls/linux/proc_net.cc", "diff": "@@ -441,6 +441,11 @@ TEST(ProcNetSnmp, CheckNetStat) {\nEXPECT_EQ(value_count, 4);\n}\n+TEST(ProcNetSnmp, Stat) {\n+ struct stat st = {};\n+ ASSERT_THAT(stat(\"/proc/net/snmp\", &st), SyscallSucceeds());\n+}\n+\nTEST(ProcNetSnmp, CheckSnmp) {\n// TODO(b/155123175): SNMP and netstat don't work on gVisor.\nSKIP_IF(IsRunningOnGvisor());\n" } ]
Go
Apache License 2.0
google/gvisor
Add test for stat("/proc/net/snmp"). PiperOrigin-RevId: 316974863
260,004
17.06.2020 17:21:19
25,200
57286eb642b9becc566f8e9c1dcbe24619f7772b
Increase timeouts for NDP tests ... to help reduce flakes. When waiting for an event to occur, use a timeout of 10s. When waiting for an event to not occur, use a timeout of 1s. Test: Ran test locally w/ run count of 1000 with and without gotsan.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/ndp_test.go", "new_path": "pkg/tcpip/stack/ndp_test.go", "diff": "@@ -43,8 +43,17 @@ const (\nlinkAddr2 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x07\")\nlinkAddr3 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x08\")\nlinkAddr4 = tcpip.LinkAddress(\"\\x02\\x02\\x03\\x04\\x05\\x09\")\n- defaultTimeout = 100 * time.Millisecond\n- defaultAsyncEventTimeout = time.Second\n+\n+ // Extra time to use when waiting for an async event to occur.\n+ defaultAsyncPositiveEventTimeout = 10 * time.Second\n+\n+ // Extra time to use when waiting for an async event to not occur.\n+ //\n+ // Since a negative check is used to make sure an event did not happen, it is\n+ // okay to use a smaller timeout compared to the positive case since execution\n+ // stall in regards to the monotonic clock will not affect the expected\n+ // outcome.\n+ defaultAsyncNegativeEventTimeout = time.Second\n)\nvar (\n@@ -442,7 +451,7 @@ func TestDADResolve(t *testing.T) {\n// Make sure the address does not resolve before the resolution time has\n// passed.\n- time.Sleep(test.expectedRetransmitTimer*time.Duration(test.dupAddrDetectTransmits) - defaultAsyncEventTimeout)\n+ time.Sleep(test.expectedRetransmitTimer*time.Duration(test.dupAddrDetectTransmits) - defaultAsyncNegativeEventTimeout)\nif addr, err := s.GetMainNICAddress(nicID, header.IPv6ProtocolNumber); err != nil {\nt.Errorf(\"got stack.GetMainNICAddress(%d, %d) = (_, %s), want = (_, nil)\", nicID, header.IPv6ProtocolNumber, err)\n} else if want := (tcpip.AddressWithPrefix{}); addr != want {\n@@ -471,7 +480,7 @@ func TestDADResolve(t *testing.T) {\n// Wait for DAD to resolve.\nselect {\n- case <-time.After(2 * defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD resolution\")\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr1, true, nil); diff != \"\" {\n@@ -1169,7 +1178,7 @@ func TestRouterDiscoveryDispatcherNoRemember(t *testing.T) {\nselect {\ncase <-ndpDisp.routerC:\nt.Fatal(\"should not have received any router events\")\n- case <-time.After(lifetimeSeconds*time.Second + defaultTimeout):\n+ case <-time.After(lifetimeSeconds*time.Second + defaultAsyncNegativeEventTimeout):\n}\n}\n@@ -1252,7 +1261,7 @@ func TestRouterDiscovery(t *testing.T) {\n// Wait for the normal lifetime plus an extra bit for the\n// router to get invalidated. If we don't get an invalidation\n// event after this time, then something is wrong.\n- expectAsyncRouterInvalidationEvent(llAddr2, l2LifetimeSeconds*time.Second+defaultAsyncEventTimeout)\n+ expectAsyncRouterInvalidationEvent(llAddr2, l2LifetimeSeconds*time.Second+defaultAsyncPositiveEventTimeout)\n// Rx an RA from lladdr2 with huge lifetime.\ne.InjectInbound(header.IPv6ProtocolNumber, raBuf(llAddr2, 1000))\n@@ -1269,7 +1278,7 @@ func TestRouterDiscovery(t *testing.T) {\n// Wait for the normal lifetime plus an extra bit for the\n// router to get invalidated. If we don't get an invalidation\n// event after this time, then something is wrong.\n- expectAsyncRouterInvalidationEvent(llAddr3, l3LifetimeSeconds*time.Second+defaultAsyncEventTimeout)\n+ expectAsyncRouterInvalidationEvent(llAddr3, l3LifetimeSeconds*time.Second+defaultAsyncPositiveEventTimeout)\n}\n// TestRouterDiscoveryMaxRouters tests that only\n@@ -1418,7 +1427,7 @@ func TestPrefixDiscoveryDispatcherNoRemember(t *testing.T) {\nselect {\ncase <-ndpDisp.prefixC:\nt.Fatal(\"should not have received any prefix events\")\n- case <-time.After(lifetimeSeconds*time.Second + defaultTimeout):\n+ case <-time.After(lifetimeSeconds*time.Second + defaultAsyncNegativeEventTimeout):\n}\n}\n@@ -1500,7 +1509,7 @@ func TestPrefixDiscovery(t *testing.T) {\nif diff := checkPrefixEvent(e, subnet2, false); diff != \"\" {\nt.Errorf(\"prefix event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(time.Duration(lifetime)*time.Second + defaultAsyncEventTimeout):\n+ case <-time.After(time.Duration(lifetime)*time.Second + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for prefix discovery event\")\n}\n@@ -1565,7 +1574,7 @@ func TestPrefixDiscoveryWithInfiniteLifetime(t *testing.T) {\nselect {\ncase <-ndpDisp.prefixC:\nt.Fatal(\"unexpectedly invalidated a prefix with infinite lifetime\")\n- case <-time.After(testInfiniteLifetime + defaultTimeout):\n+ case <-time.After(testInfiniteLifetime + defaultAsyncNegativeEventTimeout):\n}\n// Receive an RA with finite lifetime.\n@@ -1590,7 +1599,7 @@ func TestPrefixDiscoveryWithInfiniteLifetime(t *testing.T) {\nselect {\ncase <-ndpDisp.prefixC:\nt.Fatal(\"unexpectedly invalidated a prefix with infinite lifetime\")\n- case <-time.After(testInfiniteLifetime + defaultTimeout):\n+ case <-time.After(testInfiniteLifetime + defaultAsyncNegativeEventTimeout):\n}\n// Receive an RA with a prefix with a lifetime value greater than the\n@@ -1599,7 +1608,7 @@ func TestPrefixDiscoveryWithInfiniteLifetime(t *testing.T) {\nselect {\ncase <-ndpDisp.prefixC:\nt.Fatal(\"unexpectedly invalidated a prefix with infinite lifetime\")\n- case <-time.After((testInfiniteLifetimeSeconds+1)*time.Second + defaultTimeout):\n+ case <-time.After((testInfiniteLifetimeSeconds+1)*time.Second + defaultAsyncNegativeEventTimeout):\n}\n// Receive an RA with 0 lifetime.\n@@ -1835,7 +1844,7 @@ func TestAutoGenAddr(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr1, invalidatedAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(newMinVLDuration + defaultAsyncEventTimeout):\n+ case <-time.After(newMinVLDuration + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\nif containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr1) {\n@@ -1962,7 +1971,7 @@ func TestAutoGenTempAddr(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr, eventType); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n}\n@@ -1975,7 +1984,7 @@ func TestAutoGenTempAddr(t *testing.T) {\nif diff := checkDADEvent(e, nicID, addr, true, nil); diff != \"\" {\nt.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n}\n}\n@@ -2081,10 +2090,10 @@ func TestAutoGenTempAddr(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, nextAddr, invalidatedAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n- case <-time.After(newMinVLDuration + defaultTimeout):\n+ case <-time.After(newMinVLDuration + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\nif mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr2, tempAddr2}, []tcpip.AddressWithPrefix{addr1, tempAddr1}); mismatch != \"\" {\n@@ -2180,7 +2189,7 @@ func TestNoAutoGenTempAddrForLinkLocal(t *testing.T) {\nif diff := checkDADEvent(e, nicID, llAddr1, true, nil); diff != \"\" {\nt.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n}\n@@ -2188,7 +2197,7 @@ func TestNoAutoGenTempAddrForLinkLocal(t *testing.T) {\nselect {\ncase e := <-ndpDisp.autoGenAddrC:\nt.Errorf(\"got unxpected auto gen addr event = %+v\", e)\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncNegativeEventTimeout):\n}\n})\n}\n@@ -2265,7 +2274,7 @@ func TestNoAutoGenTempAddrWithoutStableAddr(t *testing.T) {\nif diff := checkDADEvent(e, nicID, addr.Address, true, nil); diff != \"\" {\nt.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(dadTransmits*retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n}\nselect {\n@@ -2273,7 +2282,7 @@ func TestNoAutoGenTempAddrWithoutStableAddr(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, tempAddr, newAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n}\n@@ -2363,13 +2372,13 @@ func TestAutoGenTempAddrRegen(t *testing.T) {\n}\n// Wait for regeneration\n- expectAutoGenAddrEventAsync(tempAddr2, newAddr, regenAfter+defaultAsyncEventTimeout)\n+ expectAutoGenAddrEventAsync(tempAddr2, newAddr, regenAfter+defaultAsyncPositiveEventTimeout)\nif mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr, tempAddr1, tempAddr2}, nil); mismatch != \"\" {\nt.Fatal(mismatch)\n}\n// Wait for regeneration\n- expectAutoGenAddrEventAsync(tempAddr3, newAddr, regenAfter+defaultAsyncEventTimeout)\n+ expectAutoGenAddrEventAsync(tempAddr3, newAddr, regenAfter+defaultAsyncPositiveEventTimeout)\nif mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, []tcpip.AddressWithPrefix{addr, tempAddr1, tempAddr2, tempAddr3}, nil); mismatch != \"\" {\nt.Fatal(mismatch)\n}\n@@ -2398,7 +2407,7 @@ func TestAutoGenTempAddrRegen(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n} else if diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff == \"\" {\n@@ -2407,12 +2416,12 @@ func TestAutoGenTempAddrRegen(t *testing.T) {\nselect {\ncase e := <-ndpDisp.autoGenAddrC:\nt.Fatalf(\"unexpectedly got an auto-generated event = %+v\", e)\n- case <-time.After(defaultTimeout):\n+ case <-time.After(defaultAsyncNegativeEventTimeout):\n}\n} else {\nt.Fatalf(\"got unexpected auto-generated event = %+v\", e)\n}\n- case <-time.After(invalidateAfter + defaultAsyncEventTimeout):\n+ case <-time.After(invalidateAfter + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n@@ -2517,7 +2526,7 @@ func TestAutoGenTempAddrRegenTimerUpdates(t *testing.T) {\nselect {\ncase e := <-ndpDisp.autoGenAddrC:\nt.Fatalf(\"unexpected auto gen addr event = %+v\", e)\n- case <-time.After(regenAfter + defaultAsyncEventTimeout):\n+ case <-time.After(regenAfter + defaultAsyncNegativeEventTimeout):\n}\n// Prefer the prefix again.\n@@ -2546,7 +2555,7 @@ func TestAutoGenTempAddrRegenTimerUpdates(t *testing.T) {\nselect {\ncase e := <-ndpDisp.autoGenAddrC:\nt.Fatalf(\"unexpected auto gen addr event = %+v\", e)\n- case <-time.After(regenAfter + defaultAsyncEventTimeout):\n+ case <-time.After(regenAfter + defaultAsyncNegativeEventTimeout):\n}\n// Set the maximum lifetimes for temporary addresses such that on the next\n@@ -2556,14 +2565,14 @@ func TestAutoGenTempAddrRegenTimerUpdates(t *testing.T) {\n// addresses + the time that has already passed since the last address was\n// generated so that the regeneration timer is needed to generate the next\n// address.\n- newLifetimes := newMinVLDuration + regenAfter + defaultAsyncEventTimeout\n+ newLifetimes := newMinVLDuration + regenAfter + defaultAsyncNegativeEventTimeout\nndpConfigs.MaxTempAddrValidLifetime = newLifetimes\nndpConfigs.MaxTempAddrPreferredLifetime = newLifetimes\nif err := s.SetNDPConfigurations(nicID, ndpConfigs); err != nil {\nt.Fatalf(\"s.SetNDPConfigurations(%d, _): %s\", nicID, err)\n}\ne.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, 100, 100))\n- expectAutoGenAddrEventAsync(tempAddr3, newAddr, regenAfter+defaultAsyncEventTimeout)\n+ expectAutoGenAddrEventAsync(tempAddr3, newAddr, regenAfter+defaultAsyncPositiveEventTimeout)\n}\n// TestMixedSLAACAddrConflictRegen tests SLAAC address regeneration in response\n@@ -2711,7 +2720,7 @@ func TestMixedSLAACAddrConflictRegen(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr, eventType); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n}\n@@ -2724,7 +2733,7 @@ func TestMixedSLAACAddrConflictRegen(t *testing.T) {\nif diff := checkDADEvent(e, nicID, addr, true, nil); diff != \"\" {\nt.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(dupAddrTransmits*retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(dupAddrTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n}\n}\n@@ -3070,7 +3079,7 @@ func TestAutoGenAddrTimerDeprecation(t *testing.T) {\nexpectPrimaryAddr(addr1)\n// Wait for addr of prefix1 to be deprecated.\n- expectAutoGenAddrEventAfter(addr1, deprecatedAddr, newMinVLDuration-time.Second+defaultAsyncEventTimeout)\n+ expectAutoGenAddrEventAfter(addr1, deprecatedAddr, newMinVLDuration-time.Second+defaultAsyncPositiveEventTimeout)\nif !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {\nt.Fatalf(\"should not have %s in the list of addresses\", addr1)\n}\n@@ -3110,7 +3119,7 @@ func TestAutoGenAddrTimerDeprecation(t *testing.T) {\nexpectPrimaryAddr(addr1)\n// Wait for addr of prefix1 to be deprecated.\n- expectAutoGenAddrEventAfter(addr1, deprecatedAddr, newMinVLDuration-time.Second+defaultAsyncEventTimeout)\n+ expectAutoGenAddrEventAfter(addr1, deprecatedAddr, newMinVLDuration-time.Second+defaultAsyncPositiveEventTimeout)\nif !containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {\nt.Fatalf(\"should not have %s in the list of addresses\", addr1)\n}\n@@ -3124,7 +3133,7 @@ func TestAutoGenAddrTimerDeprecation(t *testing.T) {\n}\n// Wait for addr of prefix1 to be invalidated.\n- expectAutoGenAddrEventAfter(addr1, invalidatedAddr, time.Second+defaultAsyncEventTimeout)\n+ expectAutoGenAddrEventAfter(addr1, invalidatedAddr, time.Second+defaultAsyncPositiveEventTimeout)\nif containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {\nt.Fatalf(\"should not have %s in the list of addresses\", addr1)\n}\n@@ -3156,7 +3165,7 @@ func TestAutoGenAddrTimerDeprecation(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr2, invalidatedAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n} else if diff := checkAutoGenAddrEvent(e, addr2, invalidatedAddr); diff == \"\" {\n@@ -3165,12 +3174,12 @@ func TestAutoGenAddrTimerDeprecation(t *testing.T) {\nselect {\ncase <-ndpDisp.autoGenAddrC:\nt.Fatal(\"unexpectedly got an auto-generated event\")\n- case <-time.After(defaultTimeout):\n+ case <-time.After(defaultAsyncNegativeEventTimeout):\n}\n} else {\nt.Fatalf(\"got unexpected auto-generated event\")\n}\n- case <-time.After(newMinVLDuration + defaultAsyncEventTimeout):\n+ case <-time.After(newMinVLDuration + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\nif containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {\n@@ -3295,7 +3304,7 @@ func TestAutoGenAddrFiniteToInfiniteToFiniteVL(t *testing.T) {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(minVLSeconds*time.Second + defaultAsyncEventTimeout):\n+ case <-time.After(minVLSeconds*time.Second + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timeout waiting for addr auto gen event\")\n}\n})\n@@ -3439,7 +3448,7 @@ func TestAutoGenAddrValidLifetimeUpdates(t *testing.T) {\nselect {\ncase <-ndpDisp.autoGenAddrC:\nt.Fatal(\"unexpectedly received an auto gen addr event\")\n- case <-time.After(time.Duration(test.evl)*time.Second - defaultAsyncEventTimeout):\n+ case <-time.After(time.Duration(test.evl)*time.Second - defaultAsyncNegativeEventTimeout):\n}\n// Wait for the invalidation event.\n@@ -3448,7 +3457,7 @@ func TestAutoGenAddrValidLifetimeUpdates(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(2 * defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timeout waiting for addr auto gen event\")\n}\n})\n@@ -3509,7 +3518,7 @@ func TestAutoGenAddrRemoval(t *testing.T) {\nselect {\ncase <-ndpDisp.autoGenAddrC:\nt.Fatal(\"unexpectedly received an auto gen addr event\")\n- case <-time.After(lifetimeSeconds*time.Second + defaultTimeout):\n+ case <-time.After(lifetimeSeconds*time.Second + defaultAsyncNegativeEventTimeout):\n}\n}\n@@ -3672,7 +3681,7 @@ func TestAutoGenAddrStaticConflict(t *testing.T) {\nselect {\ncase <-ndpDisp.autoGenAddrC:\nt.Fatal(\"unexpectedly received an auto gen addr event\")\n- case <-time.After(lifetimeSeconds*time.Second + defaultTimeout):\n+ case <-time.After(lifetimeSeconds*time.Second + defaultAsyncNegativeEventTimeout):\n}\nif !containsV6Addr(s.NICInfo()[1].ProtocolAddresses, addr) {\nt.Fatalf(\"Should have %s in the list of addresses\", addr1)\n@@ -3770,7 +3779,7 @@ func TestAutoGenAddrWithOpaqueIID(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr1, invalidatedAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(validLifetimeSecondPrefix1*time.Second + defaultAsyncEventTimeout):\n+ case <-time.After(validLifetimeSecondPrefix1*time.Second + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\nif containsV6Addr(s.NICInfo()[nicID].ProtocolAddresses, addr1) {\n@@ -3837,7 +3846,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr, eventType); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for addr auto gen event\")\n}\n}\n@@ -3863,7 +3872,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {\nif diff := checkDADEvent(e, nicID, addr, resolved, nil); diff != \"\" {\nt.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(dadTransmits*retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n}\n}\n@@ -4030,7 +4039,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {\nselect {\ncase e := <-ndpDisp.autoGenAddrC:\nt.Fatalf(\"unexpectedly got an auto-generated address event = %+v\", e)\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncNegativeEventTimeout):\n}\n})\n}\n@@ -4149,7 +4158,7 @@ func TestAutoGenAddrWithEUI64IIDNoDADRetries(t *testing.T) {\nselect {\ncase e := <-ndpDisp.autoGenAddrC:\nt.Fatalf(\"unexpectedly got an auto-generated address event = %+v\", e)\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncNegativeEventTimeout):\n}\n})\n}\n@@ -4251,7 +4260,7 @@ func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {\nif diff := checkDADEvent(e, nicID, addr.Address, true, nil); diff != \"\" {\nt.Errorf(\"dad event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(dadTransmits*retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD event\")\n}\n@@ -4277,7 +4286,7 @@ func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {\nif diff := checkAutoGenAddrEvent(e, addr, invalidatedAddr); diff != \"\" {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n- case <-time.After(defaultAsyncEventTimeout):\n+ case <-time.After(defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for invalidated auto gen addr event after deprecation\")\n}\n} else {\n@@ -4285,7 +4294,7 @@ func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {\nt.Errorf(\"auto-gen addr event mismatch (-want +got):\\n%s\", diff)\n}\n}\n- case <-time.After(lifetimeSeconds*time.Second - failureTimer - dadTransmits*retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(lifetimeSeconds*time.Second - failureTimer - dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for auto gen addr event\")\n}\n}\n@@ -4869,7 +4878,7 @@ func TestCleanupNDPState(t *testing.T) {\n// Should not get any more events (invalidation timers should have been\n// cancelled when the NDP state was cleaned up).\n- time.Sleep(lifetimeSeconds*time.Second + defaultTimeout)\n+ time.Sleep(lifetimeSeconds*time.Second + defaultAsyncNegativeEventTimeout)\nselect {\ncase <-ndpDisp.routerC:\nt.Error(\"unexpected router event\")\n@@ -5172,24 +5181,24 @@ func TestRouterSolicitation(t *testing.T) {\n// Make sure each RS is sent at the right time.\nremaining := test.maxRtrSolicit\nif remaining > 0 {\n- waitForPkt(test.effectiveMaxRtrSolicitDelay + defaultAsyncEventTimeout)\n+ waitForPkt(test.effectiveMaxRtrSolicitDelay + defaultAsyncPositiveEventTimeout)\nremaining--\n}\nfor ; remaining > 0; remaining-- {\n- if test.effectiveRtrSolicitInt > defaultAsyncEventTimeout {\n- waitForNothing(test.effectiveRtrSolicitInt - defaultAsyncEventTimeout)\n- waitForPkt(2 * defaultAsyncEventTimeout)\n+ if test.effectiveRtrSolicitInt > defaultAsyncPositiveEventTimeout {\n+ waitForNothing(test.effectiveRtrSolicitInt - defaultAsyncNegativeEventTimeout)\n+ waitForPkt(defaultAsyncPositiveEventTimeout)\n} else {\n- waitForPkt(test.effectiveRtrSolicitInt * defaultAsyncEventTimeout)\n+ waitForPkt(test.effectiveRtrSolicitInt + defaultAsyncPositiveEventTimeout)\n}\n}\n// Make sure no more RS.\nif test.effectiveRtrSolicitInt > test.effectiveMaxRtrSolicitDelay {\n- waitForNothing(test.effectiveRtrSolicitInt + defaultAsyncEventTimeout)\n+ waitForNothing(test.effectiveRtrSolicitInt + defaultAsyncNegativeEventTimeout)\n} else {\n- waitForNothing(test.effectiveMaxRtrSolicitDelay + defaultAsyncEventTimeout)\n+ waitForNothing(test.effectiveMaxRtrSolicitDelay + defaultAsyncNegativeEventTimeout)\n}\n// Make sure the counter got properly\n@@ -5305,11 +5314,11 @@ func TestStopStartSolicitingRouters(t *testing.T) {\n// Stop soliciting routers.\ntest.stopFn(t, s, true /* first */)\n- ctx, cancel := context.WithTimeout(context.Background(), delay+defaultAsyncEventTimeout)\n+ ctx, cancel := context.WithTimeout(context.Background(), delay+defaultAsyncNegativeEventTimeout)\ndefer cancel()\nif _, ok := e.ReadContext(ctx); ok {\n// A single RS may have been sent before solicitations were stopped.\n- ctx, cancel := context.WithTimeout(context.Background(), interval+defaultAsyncEventTimeout)\n+ ctx, cancel := context.WithTimeout(context.Background(), interval+defaultAsyncNegativeEventTimeout)\ndefer cancel()\nif _, ok = e.ReadContext(ctx); ok {\nt.Fatal(\"should not have sent more than one RS message\")\n@@ -5319,7 +5328,7 @@ func TestStopStartSolicitingRouters(t *testing.T) {\n// Stopping router solicitations after it has already been stopped should\n// do nothing.\ntest.stopFn(t, s, false /* first */)\n- ctx, cancel = context.WithTimeout(context.Background(), delay+defaultAsyncEventTimeout)\n+ ctx, cancel = context.WithTimeout(context.Background(), delay+defaultAsyncNegativeEventTimeout)\ndefer cancel()\nif _, ok := e.ReadContext(ctx); ok {\nt.Fatal(\"unexpectedly got a packet after router solicitation has been stopepd\")\n@@ -5332,10 +5341,10 @@ func TestStopStartSolicitingRouters(t *testing.T) {\n// Start soliciting routers.\ntest.startFn(t, s)\n- waitForPkt(delay + defaultAsyncEventTimeout)\n- waitForPkt(interval + defaultAsyncEventTimeout)\n- waitForPkt(interval + defaultAsyncEventTimeout)\n- ctx, cancel = context.WithTimeout(context.Background(), interval+defaultAsyncEventTimeout)\n+ waitForPkt(delay + defaultAsyncPositiveEventTimeout)\n+ waitForPkt(interval + defaultAsyncPositiveEventTimeout)\n+ waitForPkt(interval + defaultAsyncPositiveEventTimeout)\n+ ctx, cancel = context.WithTimeout(context.Background(), interval+defaultAsyncNegativeEventTimeout)\ndefer cancel()\nif _, ok := e.ReadContext(ctx); ok {\nt.Fatal(\"unexpectedly got an extra packet after sending out the expected RSs\")\n@@ -5344,7 +5353,7 @@ func TestStopStartSolicitingRouters(t *testing.T) {\n// Starting router solicitations after it has already completed should do\n// nothing.\ntest.startFn(t, s)\n- ctx, cancel = context.WithTimeout(context.Background(), delay+defaultAsyncEventTimeout)\n+ ctx, cancel = context.WithTimeout(context.Background(), delay+defaultAsyncNegativeEventTimeout)\ndefer cancel()\nif _, ok := e.ReadContext(ctx); ok {\nt.Fatal(\"unexpectedly got a packet after finishing router solicitations\")\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack_test.go", "new_path": "pkg/tcpip/stack/stack_test.go", "diff": "@@ -3305,7 +3305,7 @@ func TestDoDADWhenNICEnabled(t *testing.T) {\n// Wait for DAD to resolve.\nselect {\n- case <-time.After(dadTransmits*retransmitTimer + defaultAsyncEventTimeout):\n+ case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):\nt.Fatal(\"timed out waiting for DAD resolution\")\ncase e := <-ndpDisp.dadC:\nif diff := checkDADEvent(e, nicID, addr.AddressWithPrefix.Address, true, nil); diff != \"\" {\n" } ]
Go
Apache License 2.0
google/gvisor
Increase timeouts for NDP tests ... to help reduce flakes. When waiting for an event to occur, use a timeout of 10s. When waiting for an event to not occur, use a timeout of 1s. Test: Ran test locally w/ run count of 1000 with and without gotsan. PiperOrigin-RevId: 316998128
259,992
17.06.2020 18:41:55
25,200
97f6b20e89d99c4d92d6491ef3fad0933c9ba53d
Move mount configutation to RunOpts Separate mount configuration from links and move it to RunOpts, like the other options.
[ { "change_type": "MODIFY", "old_path": "pkg/test/dockerutil/dockerutil.go", "new_path": "pkg/test/dockerutil/dockerutil.go", "diff": "@@ -210,7 +210,6 @@ type Docker struct {\nRuntime string\nName string\ncopyErr error\n- mounts []string\ncleanups []func()\n}\n@@ -229,13 +228,8 @@ func MakeDocker(logger testutil.Logger) *Docker {\n}\n}\n-// Mount mounts the given source and makes it available in the container.\n-func (d *Docker) Mount(target, source string, mode MountMode) {\n- d.mounts = append(d.mounts, fmt.Sprintf(\"-v=%s:%s:%v\", source, target, mode))\n-}\n-\n// CopyFiles copies in and mounts the given files. They are always ReadOnly.\n-func (d *Docker) CopyFiles(target string, sources ...string) {\n+func (d *Docker) CopyFiles(opts *RunOpts, targetDir string, sources ...string) {\ndir, err := ioutil.TempDir(\"\", d.Name)\nif err != nil {\nd.copyErr = fmt.Errorf(\"ioutil.TempDir failed: %v\", err)\n@@ -259,12 +253,33 @@ func (d *Docker) CopyFiles(target string, sources ...string) {\n}\nd.logger.Logf(\"copy: %s -> %s\", src, dst)\n}\n- d.Mount(target, dir, ReadOnly)\n+ opts.Mounts = append(opts.Mounts, Mount{\n+ Source: dir,\n+ Target: targetDir,\n+ Mode: ReadOnly,\n+ })\n+}\n+\n+// Mount describes a mount point inside the container.\n+type Mount struct {\n+ // Source is the path outside the container.\n+ Source string\n+\n+ // Target is the path inside the container.\n+ Target string\n+\n+ // Mode tells whether the mount inside the container should be readonly.\n+ Mode MountMode\n}\n-// Link links the given target.\n-func (d *Docker) Link(target string, source *Docker) {\n- d.mounts = append(d.mounts, fmt.Sprintf(\"--link=%s:%s\", source.Name, target))\n+// Link informs dockers that a given container needs to be made accessible from\n+// the container being configured.\n+type Link struct {\n+ // Source is the container to connect to.\n+ Source *Docker\n+\n+ // Target is the alias for the container.\n+ Target string\n}\n// RunOpts are options for running a container.\n@@ -310,6 +325,12 @@ type RunOpts struct {\n// return value from the Run function.\nForeground bool\n+ // Mounts is the list of directories/files to be mounted inside the container.\n+ Mounts []Mount\n+\n+ // Links is the list of containers to be connected to the container.\n+ Links []Link\n+\n// Extra are extra arguments that may be passed.\nExtra []string\n}\n@@ -368,7 +389,13 @@ func (d *Docker) argsFor(r *RunOpts, command string, p []string) (rv []string) {\nif isExec {\nrv = append(rv, d.Name)\n} else {\n- rv = append(rv, d.mounts...)\n+ for _, m := range r.Mounts {\n+ rv = append(rv, fmt.Sprintf(\"-v=%s:%s:%v\", m.Source, m.Target, m.Mode))\n+ }\n+ for _, l := range r.Links {\n+ rv = append(rv, fmt.Sprintf(\"--link=%s:%s\", l.Source.Name, l.Target))\n+ }\n+\nif len(d.Runtime) > 0 {\nrv = append(rv, fmt.Sprintf(\"--runtime=%s\", d.Runtime))\n}\n@@ -501,8 +528,6 @@ func (d *Docker) CleanUp() {\nif err := d.Remove(); err != nil {\nd.logger.Logf(\"error removing container %q: %v\", d.Name, err)\n}\n- // Forget all mounts.\n- d.mounts = nil\n// Execute all cleanups.\nfor _, c := range d.cleanups {\nc()\n" }, { "change_type": "MODIFY", "old_path": "test/image/image_test.go", "new_path": "test/image/image_test.go", "diff": "@@ -111,11 +111,12 @@ func TestHttpd(t *testing.T) {\ndefer d.CleanUp()\n// Start the container.\n- d.CopyFiles(\"/usr/local/apache2/htdocs\", \"test/image/latin10k.txt\")\n- if err := d.Spawn(dockerutil.RunOpts{\n+ opts := dockerutil.RunOpts{\nImage: \"basic/httpd\",\nPorts: []int{80},\n- }); err != nil {\n+ }\n+ d.CopyFiles(&opts, \"/usr/local/apache2/htdocs\", \"test/image/latin10k.txt\")\n+ if err := d.Spawn(opts); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\n@@ -138,11 +139,12 @@ func TestNginx(t *testing.T) {\ndefer d.CleanUp()\n// Start the container.\n- d.CopyFiles(\"/usr/share/nginx/html\", \"test/image/latin10k.txt\")\n- if err := d.Spawn(dockerutil.RunOpts{\n+ opts := dockerutil.RunOpts{\nImage: \"basic/nginx\",\nPorts: []int{80},\n- }); err != nil {\n+ }\n+ d.CopyFiles(&opts, \"/usr/share/nginx/html\", \"test/image/latin10k.txt\")\n+ if err := d.Spawn(opts); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\n@@ -183,11 +185,17 @@ func TestMysql(t *testing.T) {\n// Tell mysql client to connect to the server and execute the file in\n// verbose mode to verify the output.\n- client.CopyFiles(\"/sql\", \"test/image/mysql.sql\")\n- client.Link(\"mysql\", server)\n- if _, err := client.Run(dockerutil.RunOpts{\n+ opts := dockerutil.RunOpts{\nImage: \"basic/mysql\",\n- }, \"mysql\", \"-hmysql\", \"-uroot\", \"-pfoobar123\", \"-v\", \"-e\", \"source /sql/mysql.sql\"); err != nil {\n+ Links: []dockerutil.Link{\n+ {\n+ Source: server,\n+ Target: \"mysql\",\n+ },\n+ },\n+ }\n+ client.CopyFiles(&opts, \"/sql\", \"test/image/mysql.sql\")\n+ if _, err := client.Run(opts, \"mysql\", \"-hmysql\", \"-uroot\", \"-pfoobar123\", \"-v\", \"-e\", \"source /sql/mysql.sql\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\n@@ -236,11 +244,12 @@ func TestRuby(t *testing.T) {\ndefer d.CleanUp()\n// Execute the ruby workload.\n- d.CopyFiles(\"/src\", \"test/image/ruby.rb\", \"test/image/ruby.sh\")\n- if err := d.Spawn(dockerutil.RunOpts{\n+ opts := dockerutil.RunOpts{\nImage: \"basic/ruby\",\nPorts: []int{8080},\n- }, \"/src/ruby.sh\"); err != nil {\n+ }\n+ d.CopyFiles(&opts, \"/src\", \"test/image/ruby.rb\", \"test/image/ruby.sh\")\n+ if err := d.Spawn(opts, \"/src/ruby.sh\"); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/iptables/iptables_test.go", "new_path": "test/iptables/iptables_test.go", "diff": "@@ -41,11 +41,12 @@ func singleTest(t *testing.T, test TestCase) {\ndefer d.CleanUp()\n// Create and start the container.\n- d.CopyFiles(\"/runner\", \"test/iptables/runner/runner\")\n- if err := d.Spawn(dockerutil.RunOpts{\n+ opts := dockerutil.RunOpts{\nImage: \"iptables\",\nCapAdd: []string{\"NET_ADMIN\"},\n- }, \"/runner/runner\", \"-name\", test.Name()); err != nil {\n+ }\n+ d.CopyFiles(&opts, \"/runner\", \"test/iptables/runner/runner\")\n+ if err := d.Spawn(opts, \"/runner/runner\", \"-name\", test.Name()); err != nil {\nt.Fatalf(\"docker run failed: %v\", err)\n}\n" }, { "change_type": "MODIFY", "old_path": "test/packetimpact/runner/packetimpact_test.go", "new_path": "test/packetimpact/runner/packetimpact_test.go", "diff": "@@ -142,7 +142,7 @@ func TestOne(t *testing.T) {\n}\nconst containerPosixServerBinary = \"/packetimpact/posix_server\"\n- dut.CopyFiles(\"/packetimpact\", \"/test/packetimpact/dut/posix_server\")\n+ dut.CopyFiles(&runOpts, \"/packetimpact\", \"/test/packetimpact/dut/posix_server\")\nif err := dut.Create(runOpts, containerPosixServerBinary, \"--ip=0.0.0.0\", \"--port=\"+ctrlPort); err != nil {\nt.Fatalf(\"unable to create container %s: %s\", dut.Name, err)\n@@ -193,7 +193,13 @@ func TestOne(t *testing.T) {\ntbb := path.Base(*testbenchBinary)\ncontainerTestbenchBinary := \"/packetimpact/\" + tbb\n- testbench.CopyFiles(\"/packetimpact\", \"/test/packetimpact/tests/\"+tbb)\n+ runOpts = dockerutil.RunOpts{\n+ Image: \"packetimpact\",\n+ CapAdd: []string{\"NET_ADMIN\"},\n+ Extra: []string{\"--sysctl\", \"net.ipv6.conf.all.disable_ipv6=0\", \"--rm\", \"-v\", tmpDir + \":\" + testOutputDir},\n+ Foreground: true,\n+ }\n+ testbench.CopyFiles(&runOpts, \"/packetimpact\", \"/test/packetimpact/tests/\"+tbb)\n// Run tcpdump in the test bench unbuffered, without DNS resolution, just on\n// the interface with the test packets.\n" }, { "change_type": "MODIFY", "old_path": "test/runtimes/runner/main.go", "new_path": "test/runtimes/runner/main.go", "diff": "@@ -79,10 +79,11 @@ func runTests() int {\n// getTests executes all tests as table tests.\nfunc getTests(d *dockerutil.Docker, excludes map[string]struct{}) ([]testing.InternalTest, error) {\n// Start the container.\n- d.CopyFiles(\"/proctor\", \"test/runtimes/proctor/proctor\")\n- if err := d.Spawn(dockerutil.RunOpts{\n+ opts := dockerutil.RunOpts{\nImage: fmt.Sprintf(\"runtimes/%s\", *image),\n- }, \"/proctor/proctor\", \"--pause\"); err != nil {\n+ }\n+ d.CopyFiles(&opts, \"/proctor\", \"test/runtimes/proctor/proctor\")\n+ if err := d.Spawn(opts, \"/proctor/proctor\", \"--pause\"); err != nil {\nreturn nil, fmt.Errorf(\"docker run failed: %v\", err)\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Move mount configutation to RunOpts Separate mount configuration from links and move it to RunOpts, like the other options. PiperOrigin-RevId: 317010158
259,992
17.06.2020 19:08:14
25,200
22b0bb21383614f6258dee27f4a254d2da97b586
Add TempTmpMount test This currently doesn't work with VSF2. Add test to ensure it's not missed. Updates
[ { "change_type": "MODIFY", "old_path": "test/e2e/integration_test.go", "new_path": "test/e2e/integration_test.go", "diff": "@@ -24,10 +24,12 @@ package integration\nimport (\n\"flag\"\n\"fmt\"\n+ \"io/ioutil\"\n\"net\"\n\"net/http\"\n\"os\"\n\"os/exec\"\n+ \"path/filepath\"\n\"strconv\"\n\"strings\"\n\"syscall\"\n@@ -389,6 +391,37 @@ func TestTmpFile(t *testing.T) {\n}\n}\n+// TestTmpMount checks that mounts inside '/tmp' are not overridden.\n+func TestTmpMount(t *testing.T) {\n+ dir, err := ioutil.TempDir(testutil.TmpDir(), \"tmp-mount\")\n+ if err != nil {\n+ t.Fatalf(\"TempDir(): %v\", err)\n+ }\n+ want := \"123\"\n+ if err := ioutil.WriteFile(filepath.Join(dir, \"file.txt\"), []byte(\"123\"), 0666); err != nil {\n+ t.Fatalf(\"WriteFile(): %v\", err)\n+ }\n+ d := dockerutil.MakeDocker(t)\n+ defer d.CleanUp()\n+\n+ opts := dockerutil.RunOpts{\n+ Image: \"basic/alpine\",\n+ Mounts: []dockerutil.Mount{\n+ {\n+ Source: dir,\n+ Target: \"/tmp/foo\",\n+ },\n+ },\n+ }\n+ got, err := d.Run(opts, \"cat\", \"/tmp/foo/file.txt\")\n+ if err != nil {\n+ t.Fatalf(\"docker run failed: %v\", err)\n+ }\n+ if want != got {\n+ t.Errorf(\"invalid file content, want: %q, got: %q\", want, got)\n+ }\n+}\n+\n// TestHostOverlayfsCopyUp tests that the --overlayfs-stale-read option causes\n// runsc to hide the incoherence of FDs opened before and after overlayfs\n// copy-up on the host.\n" } ]
Go
Apache License 2.0
google/gvisor
Add TempTmpMount test This currently doesn't work with VSF2. Add test to ensure it's not missed. Updates #1487 PiperOrigin-RevId: 317013792
259,992
17.06.2020 21:21:08
25,200
6e0c170522279ca72119b17c41e2e1dc93c49d6a
Implement Sync() to directories Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/directory.go", "new_path": "pkg/sentry/fsimpl/gofer/directory.go", "diff": "@@ -299,3 +299,8 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in\nreturn 0, syserror.EINVAL\n}\n}\n+\n+// Sync implements vfs.FileDescriptionImpl.Sync.\n+func (fd *directoryFD) Sync(ctx context.Context) error {\n+ return fd.dentry().handle.sync(ctx)\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -690,7 +690,8 @@ func (f *fileDescription) Seek(_ context.Context, offset int64, whence int32) (i\n// Sync implements FileDescriptionImpl.\nfunc (f *fileDescription) Sync(context.Context) error {\n- // TODO(gvisor.dev/issue/1672): Currently we do not support the SyncData optimization, so we always sync everything.\n+ // TODO(gvisor.dev/issue/1672): Currently we do not support the SyncData\n+ // optimization, so we always sync everything.\nreturn unix.Fsync(f.inode.hostFD)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/overlay/directory.go", "new_path": "pkg/sentry/fsimpl/overlay/directory.go", "diff": "@@ -263,3 +263,25 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in\nreturn 0, syserror.EINVAL\n}\n}\n+\n+// Sync implements vfs.FileDescriptionImpl.Sync. Forwards sync to the upper\n+// layer, if there is one. The lower layer doesn't need to sync because it\n+// never changes.\n+func (fd *directoryFD) Sync(ctx context.Context) error {\n+ d := fd.dentry()\n+ if !d.isCopiedUp() {\n+ return nil\n+ }\n+ vfsObj := d.fs.vfsfs.VirtualFilesystem()\n+ pop := vfs.PathOperation{\n+ Root: d.upperVD,\n+ Start: d.upperVD,\n+ }\n+ upperFD, err := vfsObj.OpenAt(ctx, d.fs.creds, &pop, &vfs.OpenOptions{Flags: linux.O_RDONLY | linux.O_DIRECTORY})\n+ if err != nil {\n+ return err\n+ }\n+ err = upperFD.Sync(ctx)\n+ upperFD.DecRef()\n+ return err\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go", "new_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go", "diff": "@@ -360,11 +360,6 @@ func (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (\nreturn offset, nil\n}\n-// Sync implements vfs.FileDescriptionImpl.Sync.\n-func (fd *regularFileFD) Sync(ctx context.Context) error {\n- return nil\n-}\n-\n// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.\nfunc (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {\nfile := fd.inode().impl.(*regularFile)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -778,3 +778,9 @@ func (fd *fileDescription) LockPOSIX(ctx context.Context, uid fslock.UniqueID, t\nfunc (fd *fileDescription) UnlockPOSIX(ctx context.Context, uid fslock.UniqueID, start, length uint64, whence int16) error {\nreturn fd.Locks().UnlockPOSIX(ctx, &fd.vfsfd, uid, start, length, whence)\n}\n+\n+// Sync implements vfs.FileDescriptionImpl.Sync. It does nothing because all\n+// filesystem state is in-memory.\n+func (*fileDescription) Sync(context.Context) error {\n+ return nil\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Implement Sync() to directories Updates #1035, #1199 PiperOrigin-RevId: 317028108
260,004
18.06.2020 00:08:55
25,200
09b2fca40c61f9ec8d6745f422f6f45b399e8f94
Cleanup tcp.timer and tcpip.Route When a tcp.timer or tcpip.Route is no longer used, clean up its resources so that unused memory may be released.
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/stack.go", "new_path": "pkg/tcpip/stack/stack.go", "diff": "@@ -1033,14 +1033,14 @@ func (s *Stack) removeNICLocked(id tcpip.NICID) *tcpip.Error {\n// Remove routes in-place. n tracks the number of routes written.\nn := 0\nfor i, r := range s.routeTable {\n+ s.routeTable[i] = tcpip.Route{}\nif r.NIC != id {\n// Keep this route.\n- if i > n {\ns.routeTable[n] = r\n- }\nn++\n}\n}\n+\ns.routeTable = s.routeTable[:n]\nreturn nic.remove()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/BUILD", "new_path": "pkg/tcpip/transport/tcp/BUILD", "diff": "@@ -76,7 +76,7 @@ go_library(\n)\ngo_test(\n- name = \"tcp_test\",\n+ name = \"tcp_x_test\",\nsize = \"medium\",\nsrcs = [\n\"dual_stack_test.go\",\n@@ -115,3 +115,11 @@ go_test(\n\"//pkg/tcpip/seqnum\",\n],\n)\n+\n+go_test(\n+ name = \"tcp_test\",\n+ size = \"small\",\n+ srcs = [\"timer_test.go\"],\n+ library = \":tcp\",\n+ deps = [\"//pkg/sleep\"],\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/transport/tcp/timer.go", "new_path": "pkg/tcpip/transport/tcp/timer.go", "diff": "@@ -85,6 +85,7 @@ func (t *timer) init(w *sleep.Waker) {\n// cleanup frees all resources associated with the timer.\nfunc (t *timer) cleanup() {\nt.timer.Stop()\n+ *t = timer{}\n}\n// checkExpiration checks if the given timer has actually expired, it should be\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/transport/tcp/timer_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package tcp\n+\n+import (\n+ \"testing\"\n+ \"time\"\n+\n+ \"gvisor.dev/gvisor/pkg/sleep\"\n+)\n+\n+func TestCleanup(t *testing.T) {\n+ const (\n+ timerDurationSeconds = 2\n+ isAssertedTimeoutSeconds = timerDurationSeconds + 1\n+ )\n+\n+ tmr := timer{}\n+ w := sleep.Waker{}\n+ tmr.init(&w)\n+ tmr.enable(timerDurationSeconds * time.Second)\n+ tmr.cleanup()\n+\n+ if want := (timer{}); tmr != want {\n+ t.Errorf(\"got tmr = %+v, want = %+v\", tmr, want)\n+ }\n+\n+ // The waker should not be asserted.\n+ for i := 0; i < isAssertedTimeoutSeconds; i++ {\n+ time.Sleep(time.Second)\n+ if w.IsAsserted() {\n+ t.Fatalf(\"waker asserted unexpectedly\")\n+ }\n+ }\n+}\n" } ]
Go
Apache License 2.0
google/gvisor
Cleanup tcp.timer and tcpip.Route When a tcp.timer or tcpip.Route is no longer used, clean up its resources so that unused memory may be released. PiperOrigin-RevId: 317046582
259,881
18.06.2020 09:02:14
25,200
3970c127434817304f67a2ad192cbe8094ad3353
Remove various uses of 'whitelist' Updates
[ { "change_type": "MODIFY", "old_path": "pkg/bpf/interpreter_test.go", "new_path": "pkg/bpf/interpreter_test.go", "diff": "@@ -767,7 +767,7 @@ func TestSimpleFilter(t *testing.T) {\nexpectedRet: 0,\n},\n{\n- desc: \"Whitelisted syscall is allowed\",\n+ desc: \"Allowed syscall is indeed allowed\",\nseccompData: seccompData{nr: 231 /* __NR_exit_group */, arch: 0xc000003e},\nexpectedRet: 0x7fff0000,\n},\n" }, { "change_type": "MODIFY", "old_path": "pkg/cpuid/cpuid_arm64.go", "new_path": "pkg/cpuid/cpuid_arm64.go", "diff": "@@ -312,8 +312,9 @@ func HostFeatureSet() *FeatureSet {\n}\n}\n-// Reads bogomips from host /proc/cpuinfo. Must run before whitelisting.\n-// This value is used to create the fake /proc/cpuinfo from a FeatureSet.\n+// Reads bogomips from host /proc/cpuinfo. Must run before syscall filter\n+// installation. This value is used to create the fake /proc/cpuinfo from a\n+// FeatureSet.\nfunc initCPUInfo() {\ncpuinfob, err := ioutil.ReadFile(\"/proc/cpuinfo\")\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/cpuid/cpuid_x86.go", "new_path": "pkg/cpuid/cpuid_x86.go", "diff": "@@ -1057,9 +1057,9 @@ func HostFeatureSet() *FeatureSet {\n}\n}\n-// Reads max cpu frequency from host /proc/cpuinfo. Must run before\n-// whitelisting. This value is used to create the fake /proc/cpuinfo from a\n-// FeatureSet.\n+// Reads max cpu frequency from host /proc/cpuinfo. Must run before syscall\n+// filter installation. This value is used to create the fake /proc/cpuinfo\n+// from a FeatureSet.\nfunc initCPUFreq() {\ncpuinfob, err := ioutil.ReadFile(\"/proc/cpuinfo\")\nif err != nil {\n@@ -1106,7 +1106,6 @@ func initFeaturesFromString() {\n}\nfunc init() {\n- // initCpuFreq must be run before whitelists are enabled.\ninitCPUFreq()\ninitFeaturesFromString()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/seccomp/seccomp_rules.go", "new_path": "pkg/seccomp/seccomp_rules.go", "diff": "@@ -56,7 +56,7 @@ func (a AllowValue) String() (s string) {\nreturn fmt.Sprintf(\"%#x \", uintptr(a))\n}\n-// Rule stores the whitelist of syscall arguments.\n+// Rule stores the allowed syscall arguments.\n//\n// For example:\n// rule := Rule {\n@@ -82,7 +82,7 @@ func (r Rule) String() (s string) {\nreturn\n}\n-// SyscallRules stores a map of OR'ed whitelist rules indexed by the syscall number.\n+// SyscallRules stores a map of OR'ed argument rules indexed by the syscall number.\n// If the 'Rules' is empty, we treat it as any argument is allowed.\n//\n// For example:\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fs/filesystems.go", "new_path": "pkg/sentry/fs/filesystems.go", "diff": "@@ -87,20 +87,6 @@ func RegisterFilesystem(f Filesystem) {\nfilesystems.registered[f.Name()] = f\n}\n-// UnregisterFilesystem removes a file system from the global set. To keep the\n-// file system set compatible with save/restore, UnregisterFilesystem must be\n-// called before save/restore methods.\n-//\n-// For instance, packages may unregister their file system after it is mounted.\n-// This makes sense for pseudo file systems that should not be visible or\n-// mountable. See whitelistfs in fs/host/fs.go for one example.\n-func UnregisterFilesystem(name string) {\n- filesystems.mu.Lock()\n- defer filesystems.mu.Unlock()\n-\n- delete(filesystems.registered, name)\n-}\n-\n// FindFilesystem returns a Filesystem registered at name or (nil, false) if name\n// is not a file system type that can be found in /proc/filesystems.\nfunc FindFilesystem(name string) (Filesystem, bool) {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -476,8 +476,9 @@ func (i *inode) open(ctx context.Context, d *vfs.Dentry, mnt *vfs.Mount, flags u\nreturn unixsocket.NewFileDescription(ep, ep.Type(), flags, mnt, d, &i.locks)\n}\n- // TODO(gvisor.dev/issue/1672): Whitelist specific file types here, so that\n- // we don't allow importing arbitrary file types without proper support.\n+ // TODO(gvisor.dev/issue/1672): Allow only specific file types here, so\n+ // that we don't allow importing arbitrary file types without proper\n+ // support.\nif i.isTTY {\nfd := &TTYFileDescription{\nfileDescription: fileDescription{inode: i},\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/hostinet/socket.go", "new_path": "pkg/sentry/socket/hostinet/socket.go", "diff": "@@ -324,7 +324,7 @@ func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr\nreturn nil, syserr.ErrInvalidArgument\n}\n- // Whitelist options and constrain option length.\n+ // Only allow known and safe options.\noptlen := getSockOptLen(t, level, name)\nswitch level {\ncase linux.SOL_IP:\n@@ -369,7 +369,7 @@ func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr\n// SetSockOpt implements socket.Socket.SetSockOpt.\nfunc (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error {\n- // Whitelist options and constrain option length.\n+ // Only allow known and safe options.\noptlen := setSockOptLen(t, level, name)\nswitch level {\ncase linux.SOL_IP:\n@@ -415,7 +415,7 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []\n// RecvMsg implements socket.Socket.RecvMsg.\nfunc (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlLen uint64) (int, int, linux.SockAddr, uint32, socket.ControlMessages, *syserr.Error) {\n- // Whitelist flags.\n+ // Only allow known and safe flags.\n//\n// FIXME(jamieliu): We can't support MSG_ERRQUEUE because it uses ancillary\n// messages that gvisor/pkg/tcpip/transport/unix doesn't understand. Kill the\n@@ -537,7 +537,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags\n// SendMsg implements socket.Socket.SendMsg.\nfunc (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, haveDeadline bool, deadline ktime.Time, controlMessages socket.ControlMessages) (int, *syserr.Error) {\n- // Whitelist flags.\n+ // Only allow known and safe flags.\nif flags&^(syscall.MSG_DONTWAIT|syscall.MSG_EOR|syscall.MSG_FASTOPEN|syscall.MSG_MORE|syscall.MSG_NOSIGNAL) != 0 {\nreturn 0, syserr.ErrInvalidArgument\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/README.md", "new_path": "pkg/sentry/vfs/README.md", "diff": "@@ -169,8 +169,6 @@ This construction, which is essentially a type-safe analogue to Linux's\n- binder, which is similarly far too incomplete to use.\n- - whitelistfs, which we are already actively attempting to remove.\n-\n- Save/restore. For instance, it is unclear if the current implementation of\nthe `state` package supports the inheritance pattern described above.\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/xattr.cc", "new_path": "test/syscalls/linux/xattr.cc", "diff": "@@ -73,9 +73,10 @@ TEST_F(XattrTest, XattrLargeName) {\nstd::string name = \"user.\";\nname += std::string(XATTR_NAME_MAX - name.length(), 'a');\n- // An xattr should be whitelisted before it can be accessed--do not allow\n- // arbitrary xattrs to be read/written in gVisor.\nif (!IsRunningOnGvisor()) {\n+ // In gVisor, access to xattrs is controlled with an explicit list of\n+ // allowed names. This name isn't going to be configured to allow access, so\n+ // don't test it.\nEXPECT_THAT(setxattr(path, name.c_str(), nullptr, 0, /*flags=*/0),\nSyscallSucceeds());\nEXPECT_THAT(getxattr(path, name.c_str(), nullptr, 0),\n" }, { "change_type": "MODIFY", "old_path": "tools/nogo/matchers.go", "new_path": "tools/nogo/matchers.go", "diff": "@@ -27,10 +27,15 @@ type matcher interface {\nShouldReport(d analysis.Diagnostic, fs *token.FileSet) bool\n}\n-// pathRegexps excludes explicit paths.\n+// pathRegexps filters explicit paths.\ntype pathRegexps struct {\nexpr []*regexp.Regexp\n- whitelist bool\n+\n+ // include, if true, indicates that paths matching any regexp in expr\n+ // match.\n+ //\n+ // If false, paths matching no regexps in expr match.\n+ include bool\n}\n// buildRegexps builds a list of regular expressions.\n@@ -49,17 +54,17 @@ func (p *pathRegexps) ShouldReport(d analysis.Diagnostic, fs *token.FileSet) boo\nfullPos := fs.Position(d.Pos).String()\nfor _, path := range p.expr {\nif path.MatchString(fullPos) {\n- return p.whitelist\n+ return p.include\n}\n}\n- return !p.whitelist\n+ return !p.include\n}\n// internalExcluded excludes specific internal paths.\nfunc internalExcluded(paths ...string) *pathRegexps {\nreturn &pathRegexps{\nexpr: buildRegexps(internalPrefix, paths...),\n- whitelist: false,\n+ include: false,\n}\n}\n@@ -67,7 +72,7 @@ func internalExcluded(paths ...string) *pathRegexps {\nfunc externalExcluded(paths ...string) *pathRegexps {\nreturn &pathRegexps{\nexpr: buildRegexps(externalPrefix, paths...),\n- whitelist: false,\n+ include: false,\n}\n}\n@@ -75,7 +80,7 @@ func externalExcluded(paths ...string) *pathRegexps {\nfunc internalMatches() *pathRegexps {\nreturn &pathRegexps{\nexpr: buildRegexps(internalPrefix, \".*\"),\n- whitelist: true,\n+ include: true,\n}\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Remove various uses of 'whitelist' Updates #2972 PiperOrigin-RevId: 317113059
259,891
18.06.2020 13:05:14
25,200
f1aa919181c066039a3022499de8076e523e3e7d
Ensure ip6tables module installed This module isn't always loaded automatically.
[ { "change_type": "MODIFY", "old_path": "scripts/iptables_tests.sh", "new_path": "scripts/iptables_tests.sh", "diff": "@@ -18,6 +18,9 @@ source $(dirname $0)/common.sh\nmake load-iptables\n+# Needed by ip6tables.\n+sudo modprobe ip6table_filter\n+\ninstall_runsc_for_test iptables --net-raw\ntest //test/iptables:iptables_test \"--test_arg=--runtime=runc\"\ntest //test/iptables:iptables_test \"--test_arg=--runtime=${RUNTIME}\"\n" } ]
Go
Apache License 2.0
google/gvisor
Ensure ip6tables module installed This module isn't always loaded automatically. PiperOrigin-RevId: 317164471
260,003
18.06.2020 14:28:06
25,200
f97122f44c5c4218bb2239686cd323358c661d60
Acquire lock when accessing MultiDevice's cache in String().
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/device/device.go", "new_path": "pkg/sentry/device/device.go", "diff": "@@ -188,6 +188,9 @@ type MultiDevice struct {\n// String stringifies MultiDevice.\nfunc (m *MultiDevice) String() string {\n+ m.mu.Lock()\n+ defer m.mu.Unlock()\n+\nbuf := bytes.NewBuffer(nil)\nbuf.WriteString(\"cache{\")\nfor k, v := range m.cache {\n" } ]
Go
Apache License 2.0
google/gvisor
Acquire lock when accessing MultiDevice's cache in String(). PiperOrigin-RevId: 317180925
259,992
18.06.2020 14:51:07
25,200
878050b5cf924b1f314965e5bfe21248a55616c4
Enable more VFS2 syscall tests Updates
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -33,6 +33,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:aio_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -106,6 +107,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:chroot_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -215,6 +217,7 @@ syscall_test(\nsize = \"medium\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:flock_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -350,6 +353,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:mount_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -900,6 +904,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:stat_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -916,6 +921,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:symlink_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" } ]
Go
Apache License 2.0
google/gvisor
Enable more VFS2 syscall tests Updates #2923 PiperOrigin-RevId: 317185798
259,891
18.06.2020 17:00:47
25,200
28b8a5cc3ac538333756084da28d7f13f13b5c87
iptables: remove metadata struct Metadata was useful for debugging and safety, but enough tests exist that we should see failures when (de)serialization is broken. It made stack initialization more cumbersome and it's also getting in the way of ip6tables.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netfilter/netfilter.go", "new_path": "pkg/sentry/socket/netfilter/netfilter.go", "diff": "@@ -41,19 +41,6 @@ const errorTargetName = \"ERROR\"\n// change the destination port/destination IP for packets.\nconst redirectTargetName = \"REDIRECT\"\n-// Metadata is used to verify that we are correctly serializing and\n-// deserializing iptables into structs consumable by the iptables tool. We save\n-// a metadata struct when the tables are written, and when they are read out we\n-// verify that certain fields are the same.\n-//\n-// metadata is used by this serialization/deserializing code, not netstack.\n-type metadata struct {\n- HookEntry [linux.NF_INET_NUMHOOKS]uint32\n- Underflow [linux.NF_INET_NUMHOOKS]uint32\n- NumEntries uint32\n- Size uint32\n-}\n-\n// enableLogging controls whether to log the (de)serialization of netfilter\n// structs between userspace and netstack. These logs are useful when\n// developing iptables, but can pollute sentry logs otherwise.\n@@ -83,29 +70,13 @@ func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr) (linux.IPT\nreturn linux.IPTGetinfo{}, syserr.FromError(err)\n}\n- // Find the appropriate table.\n- table, err := findTable(stack, info.Name)\n+ _, info, err := convertNetstackToBinary(stack, info.Name)\nif err != nil {\n- nflog(\"%v\", err)\n+ nflog(\"couldn't convert iptables: %v\", err)\nreturn linux.IPTGetinfo{}, syserr.ErrInvalidArgument\n}\n- // Get the hooks that apply to this table.\n- info.ValidHooks = table.ValidHooks()\n-\n- // Grab the metadata struct, which is used to store information (e.g.\n- // the number of entries) that applies to the user's encoding of\n- // iptables, but not netstack's.\n- metadata := table.Metadata().(metadata)\n-\n- // Set values from metadata.\n- info.HookEntry = metadata.HookEntry\n- info.Underflow = metadata.Underflow\n- info.NumEntries = metadata.NumEntries\n- info.Size = metadata.Size\n-\nnflog(\"returning info: %+v\", info)\n-\nreturn info, nil\n}\n@@ -118,23 +89,13 @@ func GetEntries(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen\nreturn linux.KernelIPTGetEntries{}, syserr.FromError(err)\n}\n- // Find the appropriate table.\n- table, err := findTable(stack, userEntries.Name)\n- if err != nil {\n- nflog(\"%v\", err)\n- return linux.KernelIPTGetEntries{}, syserr.ErrInvalidArgument\n- }\n-\n// Convert netstack's iptables rules to something that the iptables\n// tool can understand.\n- entries, meta, err := convertNetstackToBinary(userEntries.Name.String(), table)\n+ entries, _, err := convertNetstackToBinary(stack, userEntries.Name)\nif err != nil {\nnflog(\"couldn't read entries: %v\", err)\nreturn linux.KernelIPTGetEntries{}, syserr.ErrInvalidArgument\n}\n- if meta != table.Metadata().(metadata) {\n- panic(fmt.Sprintf(\"Table %q metadata changed between writing and reading. Was saved as %+v, but is now %+v\", userEntries.Name.String(), table.Metadata().(metadata), meta))\n- }\nif binary.Size(entries) > uintptr(outLen) {\nnflog(\"insufficient GetEntries output size: %d\", uintptr(outLen))\nreturn linux.KernelIPTGetEntries{}, syserr.ErrInvalidArgument\n@@ -143,44 +104,26 @@ func GetEntries(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen\nreturn entries, nil\n}\n-func findTable(stk *stack.Stack, tablename linux.TableName) (stack.Table, error) {\n- table, ok := stk.IPTables().GetTable(tablename.String())\n- if !ok {\n- return stack.Table{}, fmt.Errorf(\"couldn't find table %q\", tablename)\n- }\n- return table, nil\n-}\n-\n-// FillIPTablesMetadata populates stack's IPTables with metadata.\n-func FillIPTablesMetadata(stk *stack.Stack) {\n- stk.IPTables().ModifyTables(func(tables map[string]stack.Table) {\n- // In order to fill in the metadata, we have to translate ipt from its\n- // netstack format to Linux's giant-binary-blob format.\n- for name, table := range tables {\n- _, metadata, err := convertNetstackToBinary(name, table)\n- if err != nil {\n- panic(fmt.Errorf(\"Unable to set default IP tables: %v\", err))\n- }\n- table.SetMetadata(metadata)\n- tables[name] = table\n- }\n- })\n-}\n-\n// convertNetstackToBinary converts the iptables as stored in netstack to the\n// format expected by the iptables tool. Linux stores each table as a binary\n// blob that can only be traversed by parsing a bit, reading some offsets,\n// jumping to those offsets, parsing again, etc.\n-func convertNetstackToBinary(tablename string, table stack.Table) (linux.KernelIPTGetEntries, metadata, error) {\n- // Return values.\n+func convertNetstackToBinary(stack *stack.Stack, tablename linux.TableName) (linux.KernelIPTGetEntries, linux.IPTGetinfo, error) {\n+ table, ok := stack.IPTables().GetTable(tablename.String())\n+ if !ok {\n+ return linux.KernelIPTGetEntries{}, linux.IPTGetinfo{}, fmt.Errorf(\"couldn't find table %q\", tablename)\n+ }\n+\nvar entries linux.KernelIPTGetEntries\n- var meta metadata\n+ var info linux.IPTGetinfo\n+ info.ValidHooks = table.ValidHooks()\n// The table name has to fit in the struct.\nif linux.XT_TABLE_MAXNAMELEN < len(tablename) {\n- return linux.KernelIPTGetEntries{}, metadata{}, fmt.Errorf(\"table name %q too long.\", tablename)\n+ return linux.KernelIPTGetEntries{}, linux.IPTGetinfo{}, fmt.Errorf(\"table name %q too long\", tablename)\n}\n- copy(entries.Name[:], tablename)\n+ copy(info.Name[:], tablename[:])\n+ copy(entries.Name[:], tablename[:])\nfor ruleIdx, rule := range table.Rules {\nnflog(\"convert to binary: current offset: %d\", entries.Size)\n@@ -189,14 +132,14 @@ func convertNetstackToBinary(tablename string, table stack.Table) (linux.KernelI\nfor hook, hookRuleIdx := range table.BuiltinChains {\nif hookRuleIdx == ruleIdx {\nnflog(\"convert to binary: found hook %d at offset %d\", hook, entries.Size)\n- meta.HookEntry[hook] = entries.Size\n+ info.HookEntry[hook] = entries.Size\n}\n}\n// Is this a chain underflow point?\nfor underflow, underflowRuleIdx := range table.Underflows {\nif underflowRuleIdx == ruleIdx {\nnflog(\"convert to binary: found underflow %d at offset %d\", underflow, entries.Size)\n- meta.Underflow[underflow] = entries.Size\n+ info.Underflow[underflow] = entries.Size\n}\n}\n@@ -251,12 +194,12 @@ func convertNetstackToBinary(tablename string, table stack.Table) (linux.KernelI\nentries.Size += uint32(entry.NextOffset)\nentries.Entrytable = append(entries.Entrytable, entry)\n- meta.NumEntries++\n+ info.NumEntries++\n}\n- nflog(\"convert to binary: finished with an marshalled size of %d\", meta.Size)\n- meta.Size = entries.Size\n- return entries, meta, nil\n+ nflog(\"convert to binary: finished with an marshalled size of %d\", info.Size)\n+ info.Size = entries.Size\n+ return entries, info, nil\n}\nfunc marshalTarget(target stack.Target) []byte {\n@@ -569,12 +512,6 @@ func SetEntries(stk *stack.Stack, optVal []byte) *syserr.Error {\n// - There are no chains without an unconditional final rule.\n// - There are no chains without an unconditional underflow rule.\n- table.SetMetadata(metadata{\n- HookEntry: replace.HookEntry,\n- Underflow: replace.Underflow,\n- NumEntries: replace.NumEntries,\n- Size: replace.Size,\n- })\nstk.IPTables().ReplaceTable(replace.Name.String(), table)\nreturn nil\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/socket/netstack/stack.go", "new_path": "pkg/sentry/socket/netstack/stack.go", "diff": "@@ -18,7 +18,6 @@ import (\n\"gvisor.dev/gvisor/pkg/abi/linux\"\n\"gvisor.dev/gvisor/pkg/log\"\n\"gvisor.dev/gvisor/pkg/sentry/inet\"\n- \"gvisor.dev/gvisor/pkg/sentry/socket/netfilter\"\n\"gvisor.dev/gvisor/pkg/syserr\"\n\"gvisor.dev/gvisor/pkg/syserror\"\n\"gvisor.dev/gvisor/pkg/tcpip\"\n@@ -366,11 +365,6 @@ func (s *Stack) IPTables() (*stack.IPTables, error) {\nreturn s.Stack.IPTables(), nil\n}\n-// FillIPTablesMetadata populates stack's IPTables with metadata.\n-func (s *Stack) FillIPTablesMetadata() {\n- netfilter.FillIPTablesMetadata(s.Stack)\n-}\n-\n// Resume implements inet.Stack.Resume.\nfunc (s *Stack) Resume() {\ns.Stack.Resume()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables.go", "new_path": "pkg/tcpip/stack/iptables.go", "diff": "@@ -173,14 +173,6 @@ func (it *IPTables) ReplaceTable(name string, table Table) {\nit.tables[name] = table\n}\n-// ModifyTables acquires write-lock and calls fn with internal name-to-table\n-// map. This function can be used to update multiple tables atomically.\n-func (it *IPTables) ModifyTables(fn func(map[string]Table)) {\n- it.mu.Lock()\n- defer it.mu.Unlock()\n- fn(it.tables)\n-}\n-\n// GetPriorities returns slice of priorities associated with hook.\nfunc (it *IPTables) GetPriorities(hook Hook) []string {\nit.mu.RLock()\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables_types.go", "new_path": "pkg/tcpip/stack/iptables_types.go", "diff": "@@ -95,7 +95,7 @@ type IPTables struct {\n}\n// A Table defines a set of chains and hooks into the network stack. It is\n-// really just a list of rules with some metadata for entrypoints and such.\n+// really just a list of rules.\ntype Table struct {\n// Rules holds the rules that make up the table.\nRules []Rule\n@@ -110,10 +110,6 @@ type Table struct {\n// UserChains holds user-defined chains for the keyed by name. Users\n// can give their chains arbitrary names.\nUserChains map[string]int\n-\n- // Metadata holds information about the Table that is useful to users\n- // of IPTables, but not to the netstack IPTables code itself.\n- metadata interface{}\n}\n// ValidHooks returns a bitmap of the builtin hooks for the given table.\n@@ -125,16 +121,6 @@ func (table *Table) ValidHooks() uint32 {\nreturn hooks\n}\n-// Metadata returns the metadata object stored in table.\n-func (table *Table) Metadata() interface{} {\n- return table.metadata\n-}\n-\n-// SetMetadata sets the metadata object stored in table.\n-func (table *Table) SetMetadata(metadata interface{}) {\n- table.metadata = metadata\n-}\n-\n// A Rule is a packet processing rule. It consists of two pieces. First it\n// contains zero or more matchers, each of which is a specification of which\n// packets this rule applies to. If there are no matchers in the rule, it\n" }, { "change_type": "MODIFY", "old_path": "runsc/boot/loader.go", "new_path": "runsc/boot/loader.go", "diff": "@@ -1071,8 +1071,6 @@ func newEmptySandboxNetworkStack(clock tcpip.Clock, uniqueID stack.UniqueID) (in\nreturn nil, fmt.Errorf(\"SetTransportProtocolOption failed: %s\", err)\n}\n- s.FillIPTablesMetadata()\n-\nreturn &s, nil\n}\n" } ]
Go
Apache License 2.0
google/gvisor
iptables: remove metadata struct Metadata was useful for debugging and safety, but enough tests exist that we should see failures when (de)serialization is broken. It made stack initialization more cumbersome and it's also getting in the way of ip6tables. PiperOrigin-RevId: 317210653
259,891
18.06.2020 19:45:13
25,200
0c169b6ad598200a57db7bf0f679da1d6cb395c4
iptables: skip iptables if no rules are set Users that never set iptables rules shouldn't incur the iptables performance cost. Suggested by Ian (@iangudger).
[ { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables.go", "new_path": "pkg/tcpip/stack/iptables.go", "diff": "@@ -170,6 +170,7 @@ func (it *IPTables) GetTable(name string) (Table, bool) {\nfunc (it *IPTables) ReplaceTable(name string, table Table) {\nit.mu.Lock()\ndefer it.mu.Unlock()\n+ it.modified = true\nit.tables[name] = table\n}\n@@ -201,6 +202,15 @@ const (\n//\n// Precondition: pkt.NetworkHeader is set.\nfunc (it *IPTables) Check(hook Hook, pkt *PacketBuffer, gso *GSO, r *Route, address tcpip.Address, nicName string) bool {\n+ // Many users never configure iptables. Spare them the cost of rule\n+ // traversal if rules have never been set.\n+ it.mu.RLock()\n+ if !it.modified {\n+ it.mu.RUnlock()\n+ return true\n+ }\n+ it.mu.RUnlock()\n+\n// Packets are manipulated only if connection and matching\n// NAT rule exists.\nit.connections.HandlePacket(pkt, hook, gso, r)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/stack/iptables_types.go", "new_path": "pkg/tcpip/stack/iptables_types.go", "diff": "@@ -79,11 +79,11 @@ const (\n// IPTables holds all the tables for a netstack.\ntype IPTables struct {\n- // mu protects tables and priorities.\n+ // mu protects tables, priorities, and modified.\nmu sync.RWMutex\n- // tables maps table names to tables. User tables have arbitrary names. mu\n- // needs to be locked for accessing.\n+ // tables maps table names to tables. User tables have arbitrary names.\n+ // mu needs to be locked for accessing.\ntables map[string]Table\n// priorities maps each hook to a list of table names. The order of the\n@@ -91,6 +91,11 @@ type IPTables struct {\n// hook. mu needs to be locked for accessing.\npriorities map[Hook][]string\n+ // modified is whether tables have been modified at least once. It is\n+ // used to elide the iptables performance overhead for workloads that\n+ // don't utilize iptables.\n+ modified bool\n+\nconnections ConnTrackTable\n}\n" } ]
Go
Apache License 2.0
google/gvisor
iptables: skip iptables if no rules are set Users that never set iptables rules shouldn't incur the iptables performance cost. Suggested by Ian (@iangudger). PiperOrigin-RevId: 317232921
259,860
18.06.2020 22:00:56
25,200
408f3d2cd64cae6b2f76a940c76236e9841c095f
Fix vfs2 tmpfs link permission checks. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -237,18 +237,22 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\nreturn syserror.EXDEV\n}\nd := vd.Dentry().Impl().(*dentry)\n- if d.inode.isDir() {\n+ i := d.inode\n+ if i.isDir() {\nreturn syserror.EPERM\n}\n- if d.inode.nlink == 0 {\n+ if err := vfs.MayLink(auth.CredentialsFromContext(ctx), linux.FileMode(atomic.LoadUint32(&i.mode)), auth.KUID(atomic.LoadUint32(&i.uid)), auth.KGID(atomic.LoadUint32(&i.gid))); err != nil {\n+ return err\n+ }\n+ if i.nlink == 0 {\nreturn syserror.ENOENT\n}\n- if d.inode.nlink == maxLinks {\n+ if i.nlink == maxLinks {\nreturn syserror.EMLINK\n}\n- d.inode.incLinksLocked()\n- d.inode.watches.Notify(\"\", linux.IN_ATTRIB, 0, vfs.InodeEvent)\n- parentDir.insertChildLocked(fs.newDentry(d.inode), name)\n+ i.incLinksLocked()\n+ i.watches.Notify(\"\", linux.IN_ATTRIB, 0, vfs.InodeEvent)\n+ parentDir.insertChildLocked(fs.newDentry(i), name)\nreturn nil\n})\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/permissions.go", "new_path": "pkg/sentry/vfs/permissions.go", "diff": "@@ -94,6 +94,37 @@ func GenericCheckPermissions(creds *auth.Credentials, ats AccessTypes, mode linu\nreturn syserror.EACCES\n}\n+// MayLink determines whether creating a hard link to a file with the given\n+// mode, kuid, and kgid is permitted.\n+//\n+// This corresponds to Linux's fs/namei.c:may_linkat.\n+func MayLink(creds *auth.Credentials, mode linux.FileMode, kuid auth.KUID, kgid auth.KGID) error {\n+ // Source inode owner can hardlink all they like; otherwise, it must be a\n+ // safe source.\n+ if CanActAsOwner(creds, kuid) {\n+ return nil\n+ }\n+\n+ // Only regular files can be hard linked.\n+ if mode.FileType() != linux.S_IFREG {\n+ return syserror.EPERM\n+ }\n+\n+ // Setuid files should not get pinned to the filesystem.\n+ if mode&linux.S_ISUID != 0 {\n+ return syserror.EPERM\n+ }\n+\n+ // Executable setgid files should not get pinned to the filesystem, but we\n+ // don't support S_IXGRP anyway.\n+\n+ // Hardlinking to unreadable or unwritable sources is dangerous.\n+ if err := GenericCheckPermissions(creds, MayRead|MayWrite, mode, kuid, kgid); err != nil {\n+ return syserror.EPERM\n+ }\n+ return nil\n+}\n+\n// AccessTypesForOpenFlags returns the access types required to open a file\n// with the given OpenOptions.Flags. Note that this is NOT the same thing as\n// the set of accesses permitted for the opened file:\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -305,6 +305,7 @@ syscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:link_test\",\nuse_tmpfs = True, # gofer needs CAP_DAC_READ_SEARCH to use AT_EMPTY_PATH with linkat(2)\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/link.cc", "new_path": "test/syscalls/linux/link.cc", "diff": "@@ -79,8 +79,13 @@ TEST(LinkTest, PermissionDenied) {\n// Make the file \"unsafe\" to link by making it only readable, but not\n// writable.\n- const auto oldfile =\n+ const auto unwriteable_file =\nASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0400));\n+ const std::string special_path = NewTempAbsPath();\n+ ASSERT_THAT(mkfifo(special_path.c_str(), 0666), SyscallSucceeds());\n+ const auto setuid_file =\n+ ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileMode(0666 | S_ISUID));\n+\nconst std::string newname = NewTempAbsPath();\n// Do setuid in a separate thread so that after finishing this test, the\n@@ -97,8 +102,14 @@ TEST(LinkTest, PermissionDenied) {\nEXPECT_THAT(syscall(SYS_setuid, absl::GetFlag(FLAGS_scratch_uid)),\nSyscallSucceeds());\n- EXPECT_THAT(link(oldfile.path().c_str(), newname.c_str()),\n+ EXPECT_THAT(link(unwriteable_file.path().c_str(), newname.c_str()),\n+ SyscallFailsWithErrno(EPERM));\n+ EXPECT_THAT(link(special_path.c_str(), newname.c_str()),\nSyscallFailsWithErrno(EPERM));\n+ if (!IsRunningWithVFS1()) {\n+ EXPECT_THAT(link(setuid_file.path().c_str(), newname.c_str()),\n+ SyscallFailsWithErrno(EPERM));\n+ }\n});\n}\n" }, { "change_type": "MODIFY", "old_path": "test/util/test_util.h", "new_path": "test/util/test_util.h", "diff": "@@ -220,6 +220,7 @@ constexpr char kKVM[] = \"kvm\";\nbool IsRunningOnGvisor();\nconst std::string GvisorPlatform();\nbool IsRunningWithHostinet();\n+// TODO(gvisor.dev/issue/1624): Delete once VFS1 is gone.\nbool IsRunningWithVFS1();\n#ifdef __linux__\n" } ]
Go
Apache License 2.0
google/gvisor
Fix vfs2 tmpfs link permission checks. Updates #2923. PiperOrigin-RevId: 317246916
259,860
19.06.2020 05:55:35
25,200
46957ed24f21396683cf9aff13fa0cd3086ea466
Fix synthetic file bugs in gofer fs. Always check if a synthetic file already exists at a location before creating a file there, and do not try to delete synthetic gofer files from the remote fs. This fixes runsc_ptrace socket tests that create/unlink synthetic, named socket files. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -374,13 +374,16 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nreturn nil\n}\nif fs.opts.interop == InteropModeShared {\n- // The existence of a dentry at name would be inconclusive because the\n- // file it represents may have been deleted from the remote filesystem,\n- // so we would need to make an RPC to revalidate the dentry. Just\n- // attempt the file creation RPC instead. If a file does exist, the RPC\n- // will fail with EEXIST like we would have. If the RPC succeeds, and a\n- // stale dentry exists, the dentry will fail revalidation next time\n- // it's used.\n+ if child := parent.children[name]; child != nil && child.isSynthetic() {\n+ return syserror.EEXIST\n+ }\n+ // The existence of a non-synthetic dentry at name would be inconclusive\n+ // because the file it represents may have been deleted from the remote\n+ // filesystem, so we would need to make an RPC to revalidate the dentry.\n+ // Just attempt the file creation RPC instead. If a file does exist, the\n+ // RPC will fail with EEXIST like we would have. If the RPC succeeds, and a\n+ // stale dentry exists, the dentry will fail revalidation next time it's\n+ // used.\nreturn createInRemoteDir(parent, name)\n}\nif child := parent.children[name]; child != nil {\n@@ -518,7 +521,7 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b\nif child == nil {\nreturn syserror.ENOENT\n}\n- } else {\n+ } else if child == nil || !child.isSynthetic() {\nerr = parent.file.unlinkAt(ctx, name, flags)\nif err != nil {\nif child != nil {\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -57,6 +57,7 @@ syscall_test(\nsize = \"large\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:bind_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -700,6 +701,7 @@ syscall_test(\nsize = \"medium\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:socket_filesystem_non_blocking_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -707,12 +709,14 @@ syscall_test(\nadd_overlay = True,\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_filesystem_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_inet_loopback_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -721,6 +725,7 @@ syscall_test(\n# Takes too long for TSAN. Creates a lot of TCP sockets.\ntags = [\"nogotsan\"],\ntest = \"//test/syscalls/linux:socket_inet_loopback_nogotsan_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -796,6 +801,7 @@ syscall_test(\nsyscall_test(\ntest = \"//test/syscalls/linux:socket_blocking_local_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -805,6 +811,7 @@ syscall_test(\nsyscall_test(\ntest = \"//test/syscalls/linux:socket_non_stream_blocking_local_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -815,6 +822,7 @@ syscall_test(\nsyscall_test(\nsize = \"large\",\ntest = \"//test/syscalls/linux:socket_stream_blocking_local_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -826,11 +834,13 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_stream_local_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_stream_nonblock_local_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -838,6 +848,7 @@ syscall_test(\nsize = \"enormous\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:socket_unix_dgram_local_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -859,11 +870,13 @@ syscall_test(\nsize = \"enormous\",\nshard_count = 5,\ntest = \"//test/syscalls/linux:socket_unix_seqpacket_local_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_unix_stream_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -881,6 +894,7 @@ syscall_test(\nsyscall_test(\nsize = \"medium\",\ntest = \"//test/syscalls/linux:socket_unix_unbound_filesystem_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -894,6 +908,7 @@ syscall_test(\nsize = \"large\",\nshard_count = 50,\ntest = \"//test/syscalls/linux:socket_unix_unbound_stream_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" } ]
Go
Apache License 2.0
google/gvisor
Fix synthetic file bugs in gofer fs. Always check if a synthetic file already exists at a location before creating a file there, and do not try to delete synthetic gofer files from the remote fs. This fixes runsc_ptrace socket tests that create/unlink synthetic, named socket files. Updates #2923. PiperOrigin-RevId: 317293648
259,860
19.06.2020 06:37:40
25,200
6b69b955d7613ff391984661a7269eabc86020e3
Support all seek options in gofer specialFileFD.Seek. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "new_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "diff": "@@ -489,15 +489,24 @@ func (d *dentry) writeback(ctx context.Context, offset, size int64) error {\nfunc (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {\nfd.mu.Lock()\ndefer fd.mu.Unlock()\n+ newOffset, err := regularFileSeekLocked(ctx, fd.dentry(), fd.off, offset, whence)\n+ if err != nil {\n+ return 0, err\n+ }\n+ fd.off = newOffset\n+ return newOffset, nil\n+}\n+\n+// Calculate the new offset for a seek operation on a regular file.\n+func regularFileSeekLocked(ctx context.Context, d *dentry, fdOffset, offset int64, whence int32) (int64, error) {\nswitch whence {\ncase linux.SEEK_SET:\n// Use offset as specified.\ncase linux.SEEK_CUR:\n- offset += fd.off\n+ offset += fdOffset\ncase linux.SEEK_END, linux.SEEK_DATA, linux.SEEK_HOLE:\n// Ensure file size is up to date.\n- d := fd.dentry()\n- if fd.filesystem().opts.interop == InteropModeShared {\n+ if !d.cachedMetadataAuthoritative() {\nif err := d.updateFromGetattr(ctx); err != nil {\nreturn 0, err\n}\n@@ -525,7 +534,6 @@ func (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (\nif offset < 0 {\nreturn 0, syserror.EINVAL\n}\n- fd.off = offset\nreturn offset, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/special_file.go", "new_path": "pkg/sentry/fsimpl/gofer/special_file.go", "diff": "@@ -221,21 +221,12 @@ func (fd *specialFileFD) Seek(ctx context.Context, offset int64, whence int32) (\n}\nfd.mu.Lock()\ndefer fd.mu.Unlock()\n- switch whence {\n- case linux.SEEK_SET:\n- // Use offset as given.\n- case linux.SEEK_CUR:\n- offset += fd.off\n- default:\n- // SEEK_END, SEEK_DATA, and SEEK_HOLE aren't supported since it's not\n- // clear that file size is even meaningful for these files.\n- return 0, syserror.EINVAL\n- }\n- if offset < 0 {\n- return 0, syserror.EINVAL\n+ newOffset, err := regularFileSeekLocked(ctx, fd.dentry(), fd.off, offset, whence)\n+ if err != nil {\n+ return 0, err\n}\n- fd.off = offset\n- return offset, nil\n+ fd.off = newOffset\n+ return newOffset, nil\n}\n// Sync implements vfs.FileDescriptionImpl.Sync.\n" } ]
Go
Apache License 2.0
google/gvisor
Support all seek options in gofer specialFileFD.Seek. Updates #2923. PiperOrigin-RevId: 317298186
259,860
19.06.2020 08:44:26
25,200
f40d023ad6f8c19898ca105842a88961b3c2994c
Don't adjust parent link count if we replace a child dir with another. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -1196,7 +1196,8 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa\nif newParent.cachedMetadataAuthoritative() {\nnewParent.dirents = nil\nnewParent.touchCMtime()\n- if renamed.isDir() {\n+ if renamed.isDir() && (replaced == nil || !replaced.isDir()) {\n+ // Increase the link count if we did not replace another directory.\nnewParent.incLinks()\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -572,6 +572,7 @@ syscall_test(\nsize = \"medium\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:rename_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" } ]
Go
Apache License 2.0
google/gvisor
Don't adjust parent link count if we replace a child dir with another. Updates #2923. PiperOrigin-RevId: 317314460
259,860
19.06.2020 10:18:35
25,200
a609fff9d1516a095341c2016ec36f952550a46f
Fix vfs2 handling of preadv2/pwritev2 flags. Check for unsupported flags, and silently support RWF_HIPRI by doing nothing. From pkg/abi/linux/file.go: "gVisor does not implement the RWF_HIPRI feature, but the flag is accepted as a valid flag argument for preadv2/pwritev2." Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "new_path": "pkg/sentry/fsimpl/gofer/regular_file.go", "diff": "@@ -72,7 +72,9 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs\nif offset < 0 {\nreturn 0, syserror.EINVAL\n}\n- if opts.Flags != 0 {\n+\n+ // Check that flags are supported. Silently ignore RWF_HIPRI.\n+ if opts.Flags&^linux.RWF_HIPRI != 0 {\nreturn 0, syserror.EOPNOTSUPP\n}\n@@ -123,9 +125,12 @@ func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off\nif offset < 0 {\nreturn 0, syserror.EINVAL\n}\n- if opts.Flags != 0 {\n+\n+ // Check that flags are supported. Silently ignore RWF_HIPRI.\n+ if opts.Flags&^linux.RWF_HIPRI != 0 {\nreturn 0, syserror.EOPNOTSUPP\n}\n+\nlimit, err := vfs.CheckLimit(ctx, offset, src.NumBytes())\nif err != nil {\nreturn 0, err\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/special_file.go", "new_path": "pkg/sentry/fsimpl/gofer/special_file.go", "diff": "@@ -129,7 +129,9 @@ func (fd *specialFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs\nif fd.seekable && offset < 0 {\nreturn 0, syserror.EINVAL\n}\n- if opts.Flags != 0 {\n+\n+ // Check that flags are supported. Silently ignore RWF_HIPRI.\n+ if opts.Flags&^linux.RWF_HIPRI != 0 {\nreturn 0, syserror.EOPNOTSUPP\n}\n@@ -173,7 +175,9 @@ func (fd *specialFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off\nif fd.seekable && offset < 0 {\nreturn 0, syserror.EINVAL\n}\n- if opts.Flags != 0 {\n+\n+ // Check that flags are supported. Silently ignore RWF_HIPRI.\n+ if opts.Flags&^linux.RWF_HIPRI != 0 {\nreturn 0, syserror.EOPNOTSUPP\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go", "new_path": "pkg/sentry/fsimpl/tmpfs/regular_file.go", "diff": "@@ -279,6 +279,12 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs\nif offset < 0 {\nreturn 0, syserror.EINVAL\n}\n+\n+ // Check that flags are supported. Silently ignore RWF_HIPRI.\n+ if opts.Flags&^linux.RWF_HIPRI != 0 {\n+ return 0, syserror.EOPNOTSUPP\n+ }\n+\nif dst.NumBytes() == 0 {\nreturn 0, nil\n}\n@@ -304,6 +310,12 @@ func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off\nif offset < 0 {\nreturn 0, syserror.EINVAL\n}\n+\n+ // Check that flags are supported. Silently ignore RWF_HIPRI.\n+ if opts.Flags&^linux.RWF_HIPRI != 0 {\n+ return 0, syserror.EOPNOTSUPP\n+ }\n+\nsrclen := src.NumBytes()\nif srclen == 0 {\nreturn 0, nil\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -463,6 +463,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:preadv2_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" } ]
Go
Apache License 2.0
google/gvisor
Fix vfs2 handling of preadv2/pwritev2 flags. Check for unsupported flags, and silently support RWF_HIPRI by doing nothing. From pkg/abi/linux/file.go: "gVisor does not implement the RWF_HIPRI feature, but the flag is accepted as a valid flag argument for preadv2/pwritev2." Updates #2923. PiperOrigin-RevId: 317330631
259,860
19.06.2020 11:48:24
25,200
7db196c4dbb33562dbfaa28a9476c590f356b2b5
Port fadvise64 to vfs2. Like vfs1, we have a trivial implementation that ignores all valid advice. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/abi/linux/BUILD", "new_path": "pkg/abi/linux/BUILD", "diff": "@@ -23,6 +23,7 @@ go_library(\n\"errors.go\",\n\"eventfd.go\",\n\"exec.go\",\n+ \"fadvise.go\",\n\"fcntl.go\",\n\"file.go\",\n\"file_amd64.go\",\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/abi/linux/fadvise.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package linux\n+\n+const (\n+ POSIX_FADV_NORMAL = 0\n+ POSIX_FADV_RANDOM = 1\n+ POSIX_FADV_SEQUENTIAL = 2\n+ POSIX_FADV_WILLNEED = 3\n+ POSIX_FADV_DONTNEED = 4\n+ POSIX_FADV_NOREUSE = 5\n+)\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/sys_file.go", "new_path": "pkg/sentry/syscalls/linux/sys_file.go", "diff": "@@ -1111,17 +1111,6 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall\n}\n}\n-// LINT.ThenChange(vfs2/fd.go)\n-\n-const (\n- _FADV_NORMAL = 0\n- _FADV_RANDOM = 1\n- _FADV_SEQUENTIAL = 2\n- _FADV_WILLNEED = 3\n- _FADV_DONTNEED = 4\n- _FADV_NOREUSE = 5\n-)\n-\n// Fadvise64 implements linux syscall fadvise64(2).\n// This implementation currently ignores the provided advice.\nfunc Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n@@ -1146,12 +1135,12 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\n}\nswitch advice {\n- case _FADV_NORMAL:\n- case _FADV_RANDOM:\n- case _FADV_SEQUENTIAL:\n- case _FADV_WILLNEED:\n- case _FADV_DONTNEED:\n- case _FADV_NOREUSE:\n+ case linux.POSIX_FADV_NORMAL:\n+ case linux.POSIX_FADV_RANDOM:\n+ case linux.POSIX_FADV_SEQUENTIAL:\n+ case linux.POSIX_FADV_WILLNEED:\n+ case linux.POSIX_FADV_DONTNEED:\n+ case linux.POSIX_FADV_NOREUSE:\ndefault:\nreturn 0, nil, syserror.EINVAL\n}\n@@ -1160,8 +1149,6 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys\nreturn 0, nil, nil\n}\n-// LINT.IfChange\n-\nfunc mkdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error {\npath, _, err := copyInPath(t, addr, false /* allowEmpty */)\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/fd.go", "diff": "@@ -210,3 +210,41 @@ func posixLock(t *kernel.Task, args arch.SyscallArguments, file *vfs.FileDescrip\nreturn syserror.EINVAL\n}\n}\n+\n+// Fadvise64 implements fadvise64(2).\n+// This implementation currently ignores the provided advice.\n+func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n+ fd := args[0].Int()\n+ length := args[2].Int64()\n+ advice := args[3].Int()\n+\n+ // Note: offset is allowed to be negative.\n+ if length < 0 {\n+ return 0, nil, syserror.EINVAL\n+ }\n+\n+ file := t.GetFileVFS2(fd)\n+ if file == nil {\n+ return 0, nil, syserror.EBADF\n+ }\n+ defer file.DecRef()\n+\n+ // If the FD refers to a pipe or FIFO, return error.\n+ if _, isPipe := file.Impl().(*pipe.VFSPipeFD); isPipe {\n+ return 0, nil, syserror.ESPIPE\n+ }\n+\n+ switch advice {\n+ case linux.POSIX_FADV_NORMAL:\n+ case linux.POSIX_FADV_RANDOM:\n+ case linux.POSIX_FADV_SEQUENTIAL:\n+ case linux.POSIX_FADV_WILLNEED:\n+ case linux.POSIX_FADV_DONTNEED:\n+ case linux.POSIX_FADV_NOREUSE:\n+ default:\n+ return 0, nil, syserror.EINVAL\n+ }\n+\n+ // Sure, whatever.\n+ return 0, nil, nil\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/vfs2.go", "diff": "@@ -108,7 +108,7 @@ func Override() {\ns.Table[209] = syscalls.PartiallySupported(\"io_submit\", IoSubmit, \"Generally supported with exceptions. User ring optimizations are not implemented.\", []string{\"gvisor.dev/issue/204\"})\ns.Table[213] = syscalls.Supported(\"epoll_create\", EpollCreate)\ns.Table[217] = syscalls.Supported(\"getdents64\", Getdents64)\n- delete(s.Table, 221) // fdavise64\n+ s.Table[221] = syscalls.PartiallySupported(\"fadvise64\", Fadvise64, \"The syscall is 'supported', but ignores all provided advice.\", nil)\ns.Table[232] = syscalls.Supported(\"epoll_wait\", EpollWait)\ns.Table[233] = syscalls.Supported(\"epoll_ctl\", EpollCtl)\ns.Table[235] = syscalls.Supported(\"utimes\", Utimes)\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -191,6 +191,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:fadvise64_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" } ]
Go
Apache License 2.0
google/gvisor
Port fadvise64 to vfs2. Like vfs1, we have a trivial implementation that ignores all valid advice. Updates #2923. PiperOrigin-RevId: 317349505
259,860
19.06.2020 13:35:02
25,200
8655fb72482e179923987759f378543b2f489f08
Fix vfs2 proc/self/fd dirent iteration. Make proc/self/fd iteration work properly. Also, the comment on kernfs.Inode.IterDirents did not accurately reflect how parameters should be used/were used in kernfs.Inode impls other than fdDir. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "new_path": "pkg/sentry/fsimpl/kernfs/kernfs.go", "diff": "@@ -425,10 +425,10 @@ type inodeDynamicLookup interface {\n// IterDirents is used to iterate over dynamically created entries. It invokes\n// cb on each entry in the directory represented by the FileDescription.\n// 'offset' is the offset for the entire IterDirents call, which may include\n- // results from the caller. 'relOffset' is the offset inside the entries\n- // returned by this IterDirents invocation. In other words,\n- // 'offset+relOffset+1' is the value that should be set in vfs.Dirent.NextOff,\n- // while 'relOffset' is the place where iteration should start from.\n+ // results from the caller (e.g. \".\" and \"..\"). 'relOffset' is the offset\n+ // inside the entries returned by this IterDirents invocation. In other words,\n+ // 'offset' should be used to calculate each vfs.Dirent.NextOff as well as\n+ // the return value, while 'relOffset' is the place to start iteration.\nIterDirents(ctx context.Context, callback vfs.IterDirentsCallback, offset, relOffset int64) (newOffset int64, err error)\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/proc/task_fds.go", "new_path": "pkg/sentry/fsimpl/proc/task_fds.go", "diff": "@@ -64,7 +64,7 @@ type fdDir struct {\n}\n// IterDirents implements kernfs.inodeDynamicLookup.\n-func (i *fdDir) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback, absOffset, relOffset int64) (int64, error) {\n+func (i *fdDir) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback, offset, relOffset int64) (int64, error) {\nvar fds []int32\ni.task.WithMuLocked(func(t *kernel.Task) {\nif fdTable := t.FDTable(); fdTable != nil {\n@@ -72,7 +72,6 @@ func (i *fdDir) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback, abs\n}\n})\n- offset := absOffset + relOffset\ntyp := uint8(linux.DT_REG)\nif i.produceSymlink {\ntyp = linux.DT_LNK\n" } ]
Go
Apache License 2.0
google/gvisor
Fix vfs2 proc/self/fd dirent iteration. Make proc/self/fd iteration work properly. Also, the comment on kernfs.Inode.IterDirents did not accurately reflect how parameters should be used/were used in kernfs.Inode impls other than fdDir. Updates #2923. PiperOrigin-RevId: 317370325
259,860
19.06.2020 14:39:31
25,200
ad9f4691741cfada0ae09f73053d6195d43465ae
Fix bugs in vfs2 to make symlink tests pass. Return ENOENT if target path is empty. Make sure open(2) with O_CREAT|O_EXCL returns EEXIST when necessary. Correctly update atime in tmpfs using touchATime(). Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -79,7 +79,7 @@ afterSymlink:\n}\nif symlink, ok := child.inode.impl.(*symlink); ok && rp.ShouldFollowSymlink() {\n// Symlink traversal updates access time.\n- atomic.StoreInt64(&d.inode.atime, d.inode.fs.clock.Now().Nanoseconds())\n+ child.inode.touchAtime(rp.Mount())\nif err := rp.HandleSymlink(symlink.target); err != nil {\nreturn nil, err\n}\n@@ -372,6 +372,9 @@ afterTrailingSymlink:\nparentDir.inode.touchCMtime()\nreturn fd, nil\n}\n+ if mustCreate {\n+ return nil, syserror.EEXIST\n+ }\n// Is the file mounted over?\nif err := rp.CheckMount(&child.vfsd); err != nil {\nreturn nil, err\n@@ -379,7 +382,7 @@ afterTrailingSymlink:\n// Do we need to resolve a trailing symlink?\nif symlink, ok := child.inode.impl.(*symlink); ok && rp.ShouldFollowSymlink() {\n// Symlink traversal updates access time.\n- atomic.StoreInt64(&child.inode.atime, child.inode.fs.clock.Now().Nanoseconds())\n+ child.inode.touchAtime(rp.Mount())\nif err := rp.HandleSymlink(symlink.target); err != nil {\nreturn nil, err\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/syscalls/linux/vfs2/filesystem.go", "new_path": "pkg/sentry/syscalls/linux/vfs2/filesystem.go", "diff": "@@ -313,6 +313,9 @@ func symlinkat(t *kernel.Task, targetAddr usermem.Addr, newdirfd int32, linkpath\nif err != nil {\nreturn err\n}\n+ if len(target) == 0 {\n+ return syserror.ENOENT\n+ }\nlinkpath, err := copyInPath(t, linkpathAddr)\nif err != nil {\nreturn err\n" } ]
Go
Apache License 2.0
google/gvisor
Fix bugs in vfs2 to make symlink tests pass. - Return ENOENT if target path is empty. - Make sure open(2) with O_CREAT|O_EXCL returns EEXIST when necessary. - Correctly update atime in tmpfs using touchATime(). Updates #2923. PiperOrigin-RevId: 317382655
259,860
19.06.2020 18:26:04
25,200
f46f4a2af98a2a5cf5dd54e71a1a2dc999d4b4b1
Enable passing vfs2 tests. I forgot to update getdents earlier. Several thousand runs of the fsync and proc_net_unix tests all passed as well. Updates
[ { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -240,6 +240,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:fsync_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -262,6 +263,7 @@ syscall_test(\nsyscall_test(\nadd_overlay = True,\ntest = \"//test/syscalls/linux:getdents_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n@@ -1091,6 +1093,7 @@ syscall_test(\nsyscall_test(\ntest = \"//test/syscalls/linux:proc_net_unix_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" } ]
Go
Apache License 2.0
google/gvisor
Enable passing vfs2 tests. I forgot to update getdents earlier. Several thousand runs of the fsync and proc_net_unix tests all passed as well. Updates #2923. PiperOrigin-RevId: 317415488
259,860
21.06.2020 21:46:57
25,200
00928d142dd580c44a392e8e51246b543dc4f957
Fix vfs2 extended attributes. Correct behavior when given zero size arguments and trying to set user.* xattrs on files other than regular files or directories. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1206,7 +1206,7 @@ func (d *dentry) setDeleted() {\n// We only support xattrs prefixed with \"user.\" (see b/148380782). Currently,\n// there is no need to expose any other xattrs through a gofer.\nfunc (d *dentry) listxattr(ctx context.Context, creds *auth.Credentials, size uint64) ([]string, error) {\n- if d.file.isNil() {\n+ if d.file.isNil() || !d.userXattrSupported() {\nreturn nil, nil\n}\nxattrMap, err := d.file.listXattr(ctx, size)\n@@ -1232,6 +1232,9 @@ func (d *dentry) getxattr(ctx context.Context, creds *auth.Credentials, opts *vf\nif !strings.HasPrefix(opts.Name, linux.XATTR_USER_PREFIX) {\nreturn \"\", syserror.EOPNOTSUPP\n}\n+ if !d.userXattrSupported() {\n+ return \"\", syserror.ENODATA\n+ }\nreturn d.file.getXattr(ctx, opts.Name, opts.Size)\n}\n@@ -1245,6 +1248,9 @@ func (d *dentry) setxattr(ctx context.Context, creds *auth.Credentials, opts *vf\nif !strings.HasPrefix(opts.Name, linux.XATTR_USER_PREFIX) {\nreturn syserror.EOPNOTSUPP\n}\n+ if !d.userXattrSupported() {\n+ return syserror.EPERM\n+ }\nreturn d.file.setXattr(ctx, opts.Name, opts.Value, opts.Flags)\n}\n@@ -1258,9 +1264,19 @@ func (d *dentry) removexattr(ctx context.Context, creds *auth.Credentials, name\nif !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {\nreturn syserror.EOPNOTSUPP\n}\n+ if !d.userXattrSupported() {\n+ return syserror.EPERM\n+ }\nreturn d.file.removeXattr(ctx, name)\n}\n+// Extended attributes in the user.* namespace are only supported for regular\n+// files and directories.\n+func (d *dentry) userXattrSupported() bool {\n+ filetype := linux.S_IFMT & atomic.LoadUint32(&d.mode)\n+ return filetype == linux.S_IFREG || filetype == linux.S_IFDIR\n+}\n+\n// Preconditions: !d.isSynthetic(). d.isRegularFile() || d.isDirectory().\nfunc (d *dentry) ensureSharedHandle(ctx context.Context, read, write, trunc bool) error {\n// O_TRUNC unconditionally requires us to obtain a new handle (opened with\n" } ]
Go
Apache License 2.0
google/gvisor
Fix vfs2 extended attributes. Correct behavior when given zero size arguments and trying to set user.* xattrs on files other than regular files or directories. Updates #2923. PiperOrigin-RevId: 317590409
259,927
22.06.2020 10:30:21
25,200
282a6aea1b375d447fdf502c6660e92eb5e19cd4
Extract common nested LinkEndpoint pattern ... and unify logic for detached netsted endpoints. sniffer.go caused crashes if a packet delivery is attempted when the dispatcher is nil. Extracted the endpoint nesting logic into a common composable type so it can be used by the Fuchsia Netstack (the pattern is widespread there).
[ { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/link/nested/BUILD", "diff": "+load(\"//tools:defs.bzl\", \"go_library\", \"go_test\")\n+\n+package(licenses = [\"notice\"])\n+\n+go_library(\n+ name = \"nested\",\n+ srcs = [\n+ \"nested.go\",\n+ ],\n+ visibility = [\"//visibility:public\"],\n+ deps = [\n+ \"//pkg/sync\",\n+ \"//pkg/tcpip\",\n+ \"//pkg/tcpip/buffer\",\n+ \"//pkg/tcpip/stack\",\n+ ],\n+)\n+\n+go_test(\n+ name = \"nested_test\",\n+ size = \"small\",\n+ srcs = [\n+ \"nested_test.go\",\n+ ],\n+ deps = [\n+ \"//pkg/tcpip\",\n+ \"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/link/nested\",\n+ \"//pkg/tcpip/stack\",\n+ ],\n+)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/link/nested/nested.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+// Package nested provides helpers to implement the pattern of nested\n+// stack.LinkEndpoints.\n+package nested\n+\n+import (\n+ \"gvisor.dev/gvisor/pkg/sync\"\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+)\n+\n+// Endpoint is a wrapper around stack.LinkEndpoint and stack.NetworkDispatcher\n+// that can be used to implement nesting safely by providing lifecycle\n+// concurrency guards.\n+//\n+// See the tests in this package for example usage.\n+type Endpoint struct {\n+ child stack.LinkEndpoint\n+ embedder stack.NetworkDispatcher\n+\n+ // mu protects dispatcher.\n+ mu sync.RWMutex\n+ dispatcher stack.NetworkDispatcher\n+}\n+\n+var _ stack.GSOEndpoint = (*Endpoint)(nil)\n+var _ stack.LinkEndpoint = (*Endpoint)(nil)\n+var _ stack.NetworkDispatcher = (*Endpoint)(nil)\n+\n+// Init initializes a nested.Endpoint that uses embedder as the dispatcher for\n+// child on Attach.\n+//\n+// See the tests in this package for example usage.\n+func (e *Endpoint) Init(child stack.LinkEndpoint, embedder stack.NetworkDispatcher) {\n+ e.child = child\n+ e.embedder = embedder\n+}\n+\n+// DeliverNetworkPacket implements stack.NetworkDispatcher.\n+func (e *Endpoint) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\n+ e.mu.RLock()\n+ d := e.dispatcher\n+ e.mu.RUnlock()\n+ if d != nil {\n+ d.DeliverNetworkPacket(remote, local, protocol, pkt)\n+ }\n+}\n+\n+// Attach implements stack.LinkEndpoint.\n+func (e *Endpoint) Attach(dispatcher stack.NetworkDispatcher) {\n+ e.mu.Lock()\n+ e.dispatcher = dispatcher\n+ e.mu.Unlock()\n+ // If we're attaching to a valid dispatcher, pass embedder as the dispatcher\n+ // to our child, otherwise detach the child by giving it a nil dispatcher.\n+ var pass stack.NetworkDispatcher\n+ if dispatcher != nil {\n+ pass = e.embedder\n+ }\n+ e.child.Attach(pass)\n+}\n+\n+// IsAttached implements stack.LinkEndpoint.\n+func (e *Endpoint) IsAttached() bool {\n+ e.mu.RLock()\n+ isAttached := e.dispatcher != nil\n+ e.mu.RUnlock()\n+ return isAttached\n+}\n+\n+// MTU implements stack.LinkEndpoint.\n+func (e *Endpoint) MTU() uint32 {\n+ return e.child.MTU()\n+}\n+\n+// Capabilities implements stack.LinkEndpoint.\n+func (e *Endpoint) Capabilities() stack.LinkEndpointCapabilities {\n+ return e.child.Capabilities()\n+}\n+\n+// MaxHeaderLength implements stack.LinkEndpoint.\n+func (e *Endpoint) MaxHeaderLength() uint16 {\n+ return e.child.MaxHeaderLength()\n+}\n+\n+// LinkAddress implements stack.LinkEndpoint.\n+func (e *Endpoint) LinkAddress() tcpip.LinkAddress {\n+ return e.child.LinkAddress()\n+}\n+\n+// WritePacket implements stack.LinkEndpoint.\n+func (e *Endpoint) WritePacket(r *stack.Route, gso *stack.GSO, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) *tcpip.Error {\n+ return e.child.WritePacket(r, gso, protocol, pkt)\n+}\n+\n+// WritePackets implements stack.LinkEndpoint.\n+func (e *Endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.PacketBufferList, protocol tcpip.NetworkProtocolNumber) (int, *tcpip.Error) {\n+ return e.child.WritePackets(r, gso, pkts, protocol)\n+}\n+\n+// WriteRawPacket implements stack.LinkEndpoint.\n+func (e *Endpoint) WriteRawPacket(vv buffer.VectorisedView) *tcpip.Error {\n+ return e.child.WriteRawPacket(vv)\n+}\n+\n+// Wait implements stack.LinkEndpoint.\n+func (e *Endpoint) Wait() {\n+ e.child.Wait()\n+}\n+\n+// GSOMaxSize implements stack.GSOEndpoint.\n+func (e *Endpoint) GSOMaxSize() uint32 {\n+ if e, ok := e.child.(stack.GSOEndpoint); ok {\n+ return e.GSOMaxSize()\n+ }\n+ return 0\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pkg/tcpip/link/nested/nested_test.go", "diff": "+// Copyright 2020 The gVisor Authors.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+package nested_test\n+\n+import (\n+ \"testing\"\n+\n+ \"gvisor.dev/gvisor/pkg/tcpip\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/nested\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/stack\"\n+)\n+\n+type parentEndpoint struct {\n+ nested.Endpoint\n+}\n+\n+var _ stack.LinkEndpoint = (*parentEndpoint)(nil)\n+var _ stack.NetworkDispatcher = (*parentEndpoint)(nil)\n+\n+type childEndpoint struct {\n+ stack.LinkEndpoint\n+ dispatcher stack.NetworkDispatcher\n+}\n+\n+var _ stack.LinkEndpoint = (*childEndpoint)(nil)\n+\n+func (c *childEndpoint) Attach(dispatcher stack.NetworkDispatcher) {\n+ c.dispatcher = dispatcher\n+}\n+\n+func (c *childEndpoint) IsAttached() bool {\n+ return c.dispatcher != nil\n+}\n+\n+type counterDispatcher struct {\n+ count int\n+}\n+\n+var _ stack.NetworkDispatcher = (*counterDispatcher)(nil)\n+\n+func (d *counterDispatcher) DeliverNetworkPacket(tcpip.LinkAddress, tcpip.LinkAddress, tcpip.NetworkProtocolNumber, *stack.PacketBuffer) {\n+ d.count++\n+}\n+\n+func TestNestedLinkEndpoint(t *testing.T) {\n+ const emptyAddress = tcpip.LinkAddress(\"\")\n+\n+ var (\n+ childEP childEndpoint\n+ nestedEP parentEndpoint\n+ disp counterDispatcher\n+ )\n+ nestedEP.Endpoint.Init(&childEP, &nestedEP)\n+\n+ if childEP.IsAttached() {\n+ t.Error(\"On init, childEP.IsAttached() = true, want = false\")\n+ }\n+ if nestedEP.IsAttached() {\n+ t.Error(\"On init, nestedEP.IsAttached() = true, want = false\")\n+ }\n+\n+ nestedEP.Attach(&disp)\n+ if disp.count != 0 {\n+ t.Fatalf(\"After attach, got disp.count = %d, want = 0\", disp.count)\n+ }\n+ if !childEP.IsAttached() {\n+ t.Error(\"After attach, childEP.IsAttached() = false, want = true\")\n+ }\n+ if !nestedEP.IsAttached() {\n+ t.Error(\"After attach, nestedEP.IsAttached() = false, want = true\")\n+ }\n+\n+ nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, &stack.PacketBuffer{})\n+ if disp.count != 1 {\n+ t.Errorf(\"After first packet with dispatcher attached, got disp.count = %d, want = 1\", disp.count)\n+ }\n+\n+ nestedEP.Attach(nil)\n+ if childEP.IsAttached() {\n+ t.Error(\"After detach, childEP.IsAttached() = true, want = false\")\n+ }\n+ if nestedEP.IsAttached() {\n+ t.Error(\"After detach, nestedEP.IsAttached() = true, want = false\")\n+ }\n+\n+ disp.count = 0\n+ nestedEP.DeliverNetworkPacket(emptyAddress, emptyAddress, header.IPv4ProtocolNumber, &stack.PacketBuffer{})\n+ if disp.count != 0 {\n+ t.Errorf(\"After second packet with dispatcher detached, got disp.count = %d, want = 0\", disp.count)\n+ }\n+\n+}\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/sniffer/BUILD", "new_path": "pkg/tcpip/link/sniffer/BUILD", "diff": "@@ -14,6 +14,7 @@ go_library(\n\"//pkg/tcpip\",\n\"//pkg/tcpip/buffer\",\n\"//pkg/tcpip/header\",\n+ \"//pkg/tcpip/link/nested\",\n\"//pkg/tcpip/stack\",\n],\n)\n" }, { "change_type": "MODIFY", "old_path": "pkg/tcpip/link/sniffer/sniffer.go", "new_path": "pkg/tcpip/link/sniffer/sniffer.go", "diff": "@@ -31,6 +31,7 @@ import (\n\"gvisor.dev/gvisor/pkg/tcpip\"\n\"gvisor.dev/gvisor/pkg/tcpip/buffer\"\n\"gvisor.dev/gvisor/pkg/tcpip/header\"\n+ \"gvisor.dev/gvisor/pkg/tcpip/link/nested\"\n\"gvisor.dev/gvisor/pkg/tcpip/stack\"\n)\n@@ -48,18 +49,21 @@ var LogPackets uint32 = 1\nvar LogPacketsToPCAP uint32 = 1\ntype endpoint struct {\n- dispatcher stack.NetworkDispatcher\n- lower stack.LinkEndpoint\n+ nested.Endpoint\nwriter io.Writer\nmaxPCAPLen uint32\n}\n+var _ stack.GSOEndpoint = (*endpoint)(nil)\n+var _ stack.LinkEndpoint = (*endpoint)(nil)\n+var _ stack.NetworkDispatcher = (*endpoint)(nil)\n+\n// New creates a new sniffer link-layer endpoint. It wraps around another\n// endpoint and logs packets and they traverse the endpoint.\nfunc New(lower stack.LinkEndpoint) stack.LinkEndpoint {\n- return &endpoint{\n- lower: lower,\n- }\n+ sniffer := &endpoint{}\n+ sniffer.Endpoint.Init(lower, sniffer)\n+ return sniffer\n}\nfunc zoneOffset() (int32, error) {\n@@ -103,11 +107,12 @@ func NewWithWriter(lower stack.LinkEndpoint, writer io.Writer, snapLen uint32) (\nif err := writePCAPHeader(writer, snapLen); err != nil {\nreturn nil, err\n}\n- return &endpoint{\n- lower: lower,\n+ sniffer := &endpoint{\nwriter: writer,\nmaxPCAPLen: snapLen,\n- }, nil\n+ }\n+ sniffer.Endpoint.Init(lower, sniffer)\n+ return sniffer, nil\n}\n// DeliverNetworkPacket implements the stack.NetworkDispatcher interface. It is\n@@ -115,50 +120,7 @@ func NewWithWriter(lower stack.LinkEndpoint, writer io.Writer, snapLen uint32) (\n// logs the packet before forwarding to the actual dispatcher.\nfunc (e *endpoint) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\ne.dumpPacket(\"recv\", nil, protocol, pkt)\n- e.dispatcher.DeliverNetworkPacket(remote, local, protocol, pkt)\n-}\n-\n-// Attach implements the stack.LinkEndpoint interface. It saves the dispatcher\n-// and registers with the lower endpoint as its dispatcher so that \"e\" is called\n-// for inbound packets.\n-func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) {\n- e.dispatcher = dispatcher\n- e.lower.Attach(e)\n-}\n-\n-// IsAttached implements stack.LinkEndpoint.IsAttached.\n-func (e *endpoint) IsAttached() bool {\n- return e.dispatcher != nil\n-}\n-\n-// MTU implements stack.LinkEndpoint.MTU. It just forwards the request to the\n-// lower endpoint.\n-func (e *endpoint) MTU() uint32 {\n- return e.lower.MTU()\n-}\n-\n-// Capabilities implements stack.LinkEndpoint.Capabilities. It just forwards the\n-// request to the lower endpoint.\n-func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities {\n- return e.lower.Capabilities()\n-}\n-\n-// MaxHeaderLength implements the stack.LinkEndpoint interface. It just forwards\n-// the request to the lower endpoint.\n-func (e *endpoint) MaxHeaderLength() uint16 {\n- return e.lower.MaxHeaderLength()\n-}\n-\n-func (e *endpoint) LinkAddress() tcpip.LinkAddress {\n- return e.lower.LinkAddress()\n-}\n-\n-// GSOMaxSize returns the maximum GSO packet size.\n-func (e *endpoint) GSOMaxSize() uint32 {\n- if gso, ok := e.lower.(stack.GSOEndpoint); ok {\n- return gso.GSOMaxSize()\n- }\n- return 0\n+ e.Endpoint.DeliverNetworkPacket(remote, local, protocol, pkt)\n}\nfunc (e *endpoint) dumpPacket(prefix string, gso *stack.GSO, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {\n@@ -203,7 +165,7 @@ func (e *endpoint) dumpPacket(prefix string, gso *stack.GSO, protocol tcpip.Netw\n// forwards the request to the lower endpoint.\nfunc (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) *tcpip.Error {\ne.dumpPacket(\"send\", gso, protocol, pkt)\n- return e.lower.WritePacket(r, gso, protocol, pkt)\n+ return e.Endpoint.WritePacket(r, gso, protocol, pkt)\n}\n// WritePackets implements the stack.LinkEndpoint interface. It is called by\n@@ -213,7 +175,7 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe\nfor pkt := pkts.Front(); pkt != nil; pkt = pkt.Next() {\ne.dumpPacket(\"send\", gso, protocol, pkt)\n}\n- return e.lower.WritePackets(r, gso, pkts, protocol)\n+ return e.Endpoint.WritePackets(r, gso, pkts, protocol)\n}\n// WriteRawPacket implements stack.LinkEndpoint.WriteRawPacket.\n@@ -221,12 +183,9 @@ func (e *endpoint) WriteRawPacket(vv buffer.VectorisedView) *tcpip.Error {\ne.dumpPacket(\"send\", nil, 0, &stack.PacketBuffer{\nData: vv,\n})\n- return e.lower.WriteRawPacket(vv)\n+ return e.Endpoint.WriteRawPacket(vv)\n}\n-// Wait implements stack.LinkEndpoint.Wait.\n-func (e *endpoint) Wait() { e.lower.Wait() }\n-\nfunc logPacket(prefix string, protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer, gso *stack.GSO) {\n// Figure out the network layer info.\nvar transProto uint8\n" } ]
Go
Apache License 2.0
google/gvisor
Extract common nested LinkEndpoint pattern ... and unify logic for detached netsted endpoints. sniffer.go caused crashes if a packet delivery is attempted when the dispatcher is nil. Extracted the endpoint nesting logic into a common composable type so it can be used by the Fuchsia Netstack (the pattern is widespread there). PiperOrigin-RevId: 317682842
259,860
22.06.2020 11:38:25
25,200
4573e7d863d59d59c6a4f72f396f72b0f6458cb2
Check for invalid trailing / when traversing path in gofer OpenAt. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -767,15 +767,17 @@ afterTrailingSymlink:\nparent.dirMu.Unlock()\nreturn fd, err\n}\n- if err != nil {\nparent.dirMu.Unlock()\n+ if err != nil {\nreturn nil, err\n}\n- // Open existing child or follow symlink.\n- parent.dirMu.Unlock()\nif mustCreate {\nreturn nil, syserror.EEXIST\n}\n+ if !child.isDir() && rp.MustBeDir() {\n+ return nil, syserror.ENOTDIR\n+ }\n+ // Open existing child or follow symlink.\nif child.isSymlink() && rp.ShouldFollowSymlink() {\ntarget, err := child.readlink(ctx, rp.Mount())\nif err != nil {\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/BUILD", "new_path": "test/syscalls/BUILD", "diff": "@@ -181,6 +181,7 @@ syscall_test(\nsize = \"medium\",\nadd_overlay = True,\ntest = \"//test/syscalls/linux:exec_binary_test\",\n+ vfs2 = \"True\",\n)\nsyscall_test(\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/open.cc", "new_path": "test/syscalls/linux/open.cc", "diff": "@@ -439,6 +439,12 @@ TEST_F(OpenTest, CanTruncateWithStrangePermissions) {\nEXPECT_THAT(close(fd), SyscallSucceeds());\n}\n+TEST_F(OpenTest, OpenNonDirectoryWithTrailingSlash) {\n+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());\n+ const std::string bad_path = file.path() + \"/\";\n+ EXPECT_THAT(open(bad_path.c_str(), O_RDONLY), SyscallFailsWithErrno(ENOTDIR));\n+}\n+\n} // namespace\n} // namespace testing\n" } ]
Go
Apache License 2.0
google/gvisor
Check for invalid trailing / when traversing path in gofer OpenAt. Updates #2923. PiperOrigin-RevId: 317700049
259,860
22.06.2020 21:29:31
25,200
38d7b2fe5630a8f3169cfef7703921c4bc4056c2
Only allow regular files, sockets, pipes, and char devices to be imported.
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/host/host.go", "new_path": "pkg/sentry/fsimpl/host/host.go", "diff": "@@ -462,7 +462,8 @@ func (i *inode) open(ctx context.Context, d *vfs.Dentry, mnt *vfs.Mount, flags u\n// TODO(gvisor.dev/issue/1672): implement behavior corresponding to these allowed flags.\nflags &= syscall.O_ACCMODE | syscall.O_DIRECT | syscall.O_NONBLOCK | syscall.O_DSYNC | syscall.O_SYNC | syscall.O_APPEND\n- if fileType == syscall.S_IFSOCK {\n+ switch fileType {\n+ case syscall.S_IFSOCK:\nif i.isTTY {\nlog.Warningf(\"cannot use host socket fd %d as TTY\", i.hostFD)\nreturn nil, syserror.ENOTTY\n@@ -474,11 +475,8 @@ func (i *inode) open(ctx context.Context, d *vfs.Dentry, mnt *vfs.Mount, flags u\n}\n// Currently, we only allow Unix sockets to be imported.\nreturn unixsocket.NewFileDescription(ep, ep.Type(), flags, mnt, d, &i.locks)\n- }\n- // TODO(gvisor.dev/issue/1672): Allow only specific file types here, so\n- // that we don't allow importing arbitrary file types without proper\n- // support.\n+ case syscall.S_IFREG, syscall.S_IFIFO, syscall.S_IFCHR:\nif i.isTTY {\nfd := &TTYFileDescription{\nfileDescription: fileDescription{inode: i},\n@@ -499,6 +497,11 @@ func (i *inode) open(ctx context.Context, d *vfs.Dentry, mnt *vfs.Mount, flags u\nreturn nil, err\n}\nreturn vfsfd, nil\n+\n+ default:\n+ log.Warningf(\"cannot import host fd %d with file type %o\", i.hostFD, fileType)\n+ return nil, syserror.EPERM\n+ }\n}\n// fileDescription is embedded by host fd implementations of FileDescriptionImpl.\n" } ]
Go
Apache License 2.0
google/gvisor
Only allow regular files, sockets, pipes, and char devices to be imported. PiperOrigin-RevId: 317796028
260,003
23.06.2020 16:05:39
25,200
793edf4cb4597751b7f2b7b913a5ab7fa3d50373
Deflake proc test: Don't fail on DT_UNKNOWN. Per manual page: "All applications must properly handle a return of DT_UNKNOWN."
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "@@ -1998,8 +1998,6 @@ void CheckDuplicatesRecursively(std::string path) {\n}\nchildren.insert(std::string(dp->d_name));\n- ASSERT_NE(dp->d_type, DT_UNKNOWN);\n-\nif (dp->d_type == DT_DIR) {\nchild_dirs.push_back(std::string(dp->d_name));\n}\n" } ]
Go
Apache License 2.0
google/gvisor
Deflake proc test: Don't fail on DT_UNKNOWN. Per manual page: "All applications must properly handle a return of DT_UNKNOWN." PiperOrigin-RevId: 317957013
260,003
23.06.2020 17:43:02
25,200
acf519a77b480e8d974186568bd66eaa89bac024
Nit fix: Create and use a std::string object for `const char*`.
[ { "change_type": "MODIFY", "old_path": "test/syscalls/linux/proc.cc", "new_path": "test/syscalls/linux/proc.cc", "diff": "@@ -1982,24 +1982,26 @@ void CheckDuplicatesRecursively(std::string path) {\nbreak; // We're done.\n}\n- if (strcmp(dp->d_name, \".\") == 0 || strcmp(dp->d_name, \"..\") == 0) {\n+ const std::string name = dp->d_name;\n+\n+ if (name == \".\" || name == \"..\") {\ncontinue;\n}\n// Ignore a duplicate entry if it isn't the last attempt.\nif (i == max_attempts - 1) {\n- ASSERT_EQ(children.find(std::string(dp->d_name)), children.end())\n- << absl::StrCat(path, \"/\", dp->d_name);\n- } else if (children.find(std::string(dp->d_name)) != children.end()) {\n+ ASSERT_EQ(children.find(name), children.end())\n+ << absl::StrCat(path, \"/\", name);\n+ } else if (children.find(name) != children.end()) {\nstd::cerr << \"Duplicate entry: \" << i << \":\"\n- << absl::StrCat(path, \"/\", dp->d_name) << std::endl;\n+ << absl::StrCat(path, \"/\", name) << std::endl;\nsuccess = false;\nbreak;\n}\n- children.insert(std::string(dp->d_name));\n+ children.insert(name);\nif (dp->d_type == DT_DIR) {\n- child_dirs.push_back(std::string(dp->d_name));\n+ child_dirs.push_back(name);\n}\n}\nif (success) {\n" } ]
Go
Apache License 2.0
google/gvisor
Nit fix: Create and use a std::string object for `const char*`. PiperOrigin-RevId: 317973144
259,860
23.06.2020 18:31:53
25,200
65a587dedf1a30b3614a66532d2b448026b9c540
Complete inotify IN_EXCL_UNLINK implementation in VFS2. Events were only skipped on parent directories after their children were unlinked; events on the unlinked file itself need to be skipped as well. As a result, all Watches.Notify() calls need to know whether the dentry where the call came from was unlinked. Updates
[ { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "new_path": "pkg/sentry/fsimpl/gofer/filesystem.go", "diff": "@@ -375,7 +375,7 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif dir {\nev |= linux.IN_ISDIR\n}\n- parent.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent)\n+ parent.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */)\nreturn nil\n}\nif fs.opts.interop == InteropModeShared {\n@@ -409,7 +409,7 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir\nif dir {\nev |= linux.IN_ISDIR\n}\n- parent.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent)\n+ parent.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */)\nreturn nil\n}\n@@ -543,7 +543,7 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b\n// Generate inotify events for rmdir or unlink.\nif dir {\n- parent.watches.Notify(name, linux.IN_DELETE|linux.IN_ISDIR, 0, vfs.InodeEvent)\n+ parent.watches.Notify(name, linux.IN_DELETE|linux.IN_ISDIR, 0, vfs.InodeEvent, true /* unlinked */)\n} else {\nvar cw *vfs.Watches\nif child != nil {\n@@ -1040,7 +1040,7 @@ func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.Resolving\n}\nchildVFSFD = &fd.vfsfd\n}\n- d.watches.Notify(name, linux.IN_CREATE, 0, vfs.PathEvent)\n+ d.watches.Notify(name, linux.IN_CREATE, 0, vfs.PathEvent, false /* unlinked */)\nreturn childVFSFD, nil\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/gofer/gofer.go", "new_path": "pkg/sentry/fsimpl/gofer/gofer.go", "diff": "@@ -1066,9 +1066,9 @@ func (d *dentry) InotifyWithParent(events, cookie uint32, et vfs.EventType) {\nd.fs.renameMu.RLock()\n// The ordering below is important, Linux always notifies the parent first.\nif d.parent != nil {\n- d.parent.watches.NotifyWithExclusions(d.name, events, cookie, et, d.isDeleted())\n+ d.parent.watches.Notify(d.name, events, cookie, et, d.isDeleted())\n}\n- d.watches.Notify(\"\", events, cookie, et)\n+ d.watches.Notify(\"\", events, cookie, et, d.isDeleted())\nd.fs.renameMu.RUnlock()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "new_path": "pkg/sentry/fsimpl/tmpfs/filesystem.go", "diff": "@@ -182,7 +182,7 @@ func (fs *filesystem) doCreateAt(rp *vfs.ResolvingPath, dir bool, create func(pa\nif dir {\nev |= linux.IN_ISDIR\n}\n- parentDir.inode.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent)\n+ parentDir.inode.watches.Notify(name, uint32(ev), 0, vfs.InodeEvent, false /* unlinked */)\nparentDir.inode.touchCMtime()\nreturn nil\n}\n@@ -251,7 +251,7 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.\nreturn syserror.EMLINK\n}\ni.incLinksLocked()\n- i.watches.Notify(\"\", linux.IN_ATTRIB, 0, vfs.InodeEvent)\n+ i.watches.Notify(\"\", linux.IN_ATTRIB, 0, vfs.InodeEvent, false /* unlinked */)\nparentDir.insertChildLocked(fs.newDentry(i), name)\nreturn nil\n})\n@@ -368,7 +368,7 @@ afterTrailingSymlink:\nif err != nil {\nreturn nil, err\n}\n- parentDir.inode.watches.Notify(name, linux.IN_CREATE, 0, vfs.PathEvent)\n+ parentDir.inode.watches.Notify(name, linux.IN_CREATE, 0, vfs.PathEvent, false /* unlinked */)\nparentDir.inode.touchCMtime()\nreturn fd, nil\n}\n@@ -625,7 +625,7 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error\nreturn err\n}\nparentDir.removeChildLocked(child)\n- parentDir.inode.watches.Notify(name, linux.IN_DELETE|linux.IN_ISDIR, 0, vfs.InodeEvent)\n+ parentDir.inode.watches.Notify(name, linux.IN_DELETE|linux.IN_ISDIR, 0, vfs.InodeEvent, true /* unlinked */)\n// Remove links for child, child/., and child/..\nchild.inode.decLinksLocked()\nchild.inode.decLinksLocked()\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "new_path": "pkg/sentry/fsimpl/tmpfs/tmpfs.go", "diff": "@@ -259,14 +259,16 @@ func (d *dentry) InotifyWithParent(events, cookie uint32, et vfs.EventType) {\nevents |= linux.IN_ISDIR\n}\n+ // tmpfs never calls VFS.InvalidateDentry(), so d.vfsd.IsDead() indicates\n+ // that d was deleted.\n+ deleted := d.vfsd.IsDead()\n+\nd.inode.fs.mu.RLock()\n// The ordering below is important, Linux always notifies the parent first.\nif d.parent != nil {\n- // tmpfs never calls VFS.InvalidateDentry(), so d.vfsd.IsDead() indicates\n- // that d was deleted.\n- d.parent.inode.watches.NotifyWithExclusions(d.name, events, cookie, et, d.vfsd.IsDead())\n+ d.parent.inode.watches.Notify(d.name, events, cookie, et, deleted)\n}\n- d.inode.watches.Notify(\"\", events, cookie, et)\n+ d.inode.watches.Notify(\"\", events, cookie, et, deleted)\nd.inode.fs.mu.RUnlock()\n}\n" }, { "change_type": "MODIFY", "old_path": "pkg/sentry/vfs/inotify.go", "new_path": "pkg/sentry/vfs/inotify.go", "diff": "@@ -467,21 +467,16 @@ func (w *Watches) Remove(id uint64) {\ndelete(w.ws, id)\n}\n-// Notify queues a new event with all watches in this set.\n-func (w *Watches) Notify(name string, events, cookie uint32, et EventType) {\n- w.NotifyWithExclusions(name, events, cookie, et, false)\n-}\n-\n-// NotifyWithExclusions queues a new event with watches in this set. Watches\n-// with IN_EXCL_UNLINK are skipped if the event is coming from a child that\n-// has been unlinked.\n-func (w *Watches) NotifyWithExclusions(name string, events, cookie uint32, et EventType, unlinked bool) {\n+// Notify queues a new event with watches in this set. Watches with\n+// IN_EXCL_UNLINK are skipped if the event is coming from a child that has been\n+// unlinked.\n+func (w *Watches) Notify(name string, events, cookie uint32, et EventType, unlinked bool) {\n// N.B. We don't defer the unlocks because Notify is in the hot path of\n// all IO operations, and the defer costs too much for small IO\n// operations.\nw.mu.RLock()\nfor _, watch := range w.ws {\n- if unlinked && watch.ExcludeUnlinkedChildren() && et == PathEvent {\n+ if unlinked && watch.ExcludeUnlinked() && et == PathEvent {\ncontinue\n}\nwatch.Notify(name, events, cookie)\n@@ -492,7 +487,7 @@ func (w *Watches) NotifyWithExclusions(name string, events, cookie uint32, et Ev\n// HandleDeletion is called when the watch target is destroyed to emit\n// the appropriate events.\nfunc (w *Watches) HandleDeletion() {\n- w.Notify(\"\", linux.IN_DELETE_SELF, 0, InodeEvent)\n+ w.Notify(\"\", linux.IN_DELETE_SELF, 0, InodeEvent, true /* unlinked */)\n// We can't hold w.mu while calling watch.handleDeletion to preserve lock\n// ordering w.r.t to the owner inotify instances. Instead, atomically move\n@@ -539,14 +534,12 @@ func (w *Watch) OwnerID() uint64 {\nreturn w.owner.id\n}\n-// ExcludeUnlinkedChildren indicates whether the watched object should continue\n-// to be notified of events of its children after they have been unlinked, e.g.\n-// for an open file descriptor.\n+// ExcludeUnlinked indicates whether the watched object should continue to be\n+// notified of events originating from a path that has been unlinked.\n//\n-// TODO(gvisor.dev/issue/1479): Implement IN_EXCL_UNLINK.\n-// We can do this by keeping track of the set of unlinked children in Watches\n-// to skip notification.\n-func (w *Watch) ExcludeUnlinkedChildren() bool {\n+// For example, if \"foo/bar\" is opened and then unlinked, operations on the\n+// open fd may be ignored by watches on \"foo\" and \"foo/bar\" with IN_EXCL_UNLINK.\n+func (w *Watch) ExcludeUnlinked() bool {\nreturn atomic.LoadUint32(&w.mask)&linux.IN_EXCL_UNLINK != 0\n}\n@@ -710,10 +703,10 @@ func InotifyEventFromStatMask(mask uint32) uint32 {\n// parent/child notifications, the child is notified first in this case.\nfunc InotifyRemoveChild(self, parent *Watches, name string) {\nif self != nil {\n- self.Notify(\"\", linux.IN_ATTRIB, 0, InodeEvent)\n+ self.Notify(\"\", linux.IN_ATTRIB, 0, InodeEvent, true /* unlinked */)\n}\nif parent != nil {\n- parent.Notify(name, linux.IN_DELETE, 0, InodeEvent)\n+ parent.Notify(name, linux.IN_DELETE, 0, InodeEvent, true /* unlinked */)\n}\n}\n@@ -726,13 +719,13 @@ func InotifyRename(ctx context.Context, renamed, oldParent, newParent *Watches,\n}\ncookie := uniqueid.InotifyCookie(ctx)\nif oldParent != nil {\n- oldParent.Notify(oldName, dirEv|linux.IN_MOVED_FROM, cookie, InodeEvent)\n+ oldParent.Notify(oldName, dirEv|linux.IN_MOVED_FROM, cookie, InodeEvent, false /* unlinked */)\n}\nif newParent != nil {\n- newParent.Notify(newName, dirEv|linux.IN_MOVED_TO, cookie, InodeEvent)\n+ newParent.Notify(newName, dirEv|linux.IN_MOVED_TO, cookie, InodeEvent, false /* unlinked */)\n}\n// Somewhat surprisingly, self move events do not have a cookie.\nif renamed != nil {\n- renamed.Notify(\"\", linux.IN_MOVE_SELF, 0, InodeEvent)\n+ renamed.Notify(\"\", linux.IN_MOVE_SELF, 0, InodeEvent, false /* unlinked */)\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "test/syscalls/linux/inotify.cc", "new_path": "test/syscalls/linux/inotify.cc", "diff": "@@ -1840,9 +1840,7 @@ TEST(Inotify, IncludeUnlinkedFile_NoRandomSave) {\nconst TempPath dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\nconst TempPath file = ASSERT_NO_ERRNO_AND_VALUE(\nTempPath::CreateFileWith(dir.path(), \"123\", TempPath::kDefaultFileMode));\n-\n- const FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\nconst FileDescriptor inotify_fd =\nASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n@@ -1855,7 +1853,7 @@ TEST(Inotify, IncludeUnlinkedFile_NoRandomSave) {\nint val = 0;\nASSERT_THAT(read(fd.get(), &val, sizeof(val)), SyscallSucceeds());\nASSERT_THAT(write(fd.get(), &val, sizeof(val)), SyscallSucceeds());\n- const std::vector<Event> events =\n+ std::vector<Event> events =\nASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\nEXPECT_THAT(events, Are({\nEvent(IN_ATTRIB, file_wd),\n@@ -1865,6 +1863,15 @@ TEST(Inotify, IncludeUnlinkedFile_NoRandomSave) {\nEvent(IN_MODIFY, dir_wd, Basename(file.path())),\nEvent(IN_MODIFY, file_wd),\n}));\n+\n+ fd.reset();\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ EXPECT_THAT(events, Are({\n+ Event(IN_CLOSE_WRITE, dir_wd, Basename(file.path())),\n+ Event(IN_CLOSE_WRITE, file_wd),\n+ Event(IN_DELETE_SELF, file_wd),\n+ Event(IN_IGNORED, file_wd),\n+ }));\n}\n// Watches created with IN_EXCL_UNLINK will stop emitting events on fds for\n@@ -1881,13 +1888,14 @@ TEST(Inotify, ExcludeUnlink_NoRandomSave) {\nconst TempPath file =\nASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileIn(dir.path()));\n- const FileDescriptor fd =\n- ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\n+ FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));\nconst FileDescriptor inotify_fd =\nASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n- const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ const int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\ninotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+ const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), file.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n// Unlink the child, which should cause further operations on the open file\n// descriptor to be ignored.\n@@ -1895,14 +1903,28 @@ TEST(Inotify, ExcludeUnlink_NoRandomSave) {\nint val = 0;\nASSERT_THAT(write(fd.get(), &val, sizeof(val)), SyscallSucceeds());\nASSERT_THAT(read(fd.get(), &val, sizeof(val)), SyscallSucceeds());\n- const std::vector<Event> events =\n+ std::vector<Event> events =\nASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n- EXPECT_THAT(events, Are({Event(IN_DELETE, wd, Basename(file.path()))}));\n+ EXPECT_THAT(events, Are({\n+ Event(IN_ATTRIB, file_wd),\n+ Event(IN_DELETE, dir_wd, Basename(file.path())),\n+ }));\n+\n+ fd.reset();\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ ASSERT_THAT(events, Are({\n+ Event(IN_DELETE_SELF, file_wd),\n+ Event(IN_IGNORED, file_wd),\n+ }));\n}\n// We need to disable S/R because there are filesystems where we cannot re-open\n// fds to an unlinked file across S/R, e.g. gofer-backed filesytems.\nTEST(Inotify, ExcludeUnlinkDirectory_NoRandomSave) {\n+ // TODO(gvisor.dev/issue/1624): This test fails on VFS1. Remove once VFS1 is\n+ // deleted.\n+ SKIP_IF(IsRunningWithVFS1());\n+\nconst DisableSave ds;\nconst TempPath parent = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());\n@@ -1912,20 +1934,29 @@ TEST(Inotify, ExcludeUnlinkDirectory_NoRandomSave) {\nconst FileDescriptor inotify_fd =\nASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n- const FileDescriptor fd =\n+ FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(dirPath.c_str(), O_RDONLY | O_DIRECTORY));\n- const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ const int parent_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\ninotify_fd.get(), parent.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+ const int self_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n// Unlink the dir, and then close the open fd.\nASSERT_THAT(rmdir(dirPath.c_str()), SyscallSucceeds());\ndir.reset();\n- const std::vector<Event> events =\n+ std::vector<Event> events =\nASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n// No close event should appear.\nASSERT_THAT(events,\n- Are({Event(IN_DELETE | IN_ISDIR, wd, Basename(dirPath))}));\n+ Are({Event(IN_DELETE | IN_ISDIR, parent_wd, Basename(dirPath))}));\n+\n+ fd.reset();\n+ events = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n+ ASSERT_THAT(events, Are({\n+ Event(IN_DELETE_SELF, self_wd),\n+ Event(IN_IGNORED, self_wd),\n+ }));\n}\n// If \"dir/child\" and \"dir/child2\" are links to the same file, and \"dir/child\"\n@@ -1989,10 +2020,6 @@ TEST(Inotify, ExcludeUnlinkInodeEvents_NoRandomSave) {\nconst FileDescriptor fd =\nASSERT_NO_ERRNO_AND_VALUE(Open(file.path().c_str(), O_RDWR));\n- const FileDescriptor inotify_fd =\n- ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n- const int wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n- inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n// NOTE(b/157163751): Create another link before unlinking. This is needed for\n// the gofer filesystem in gVisor, where open fds will not work once the link\n@@ -2006,6 +2033,13 @@ TEST(Inotify, ExcludeUnlinkInodeEvents_NoRandomSave) {\nASSERT_THAT(rc, SyscallSucceeds());\n}\n+ const FileDescriptor inotify_fd =\n+ ASSERT_NO_ERRNO_AND_VALUE(InotifyInit1(IN_NONBLOCK));\n+ const int dir_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), dir.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+ const int file_wd = ASSERT_NO_ERRNO_AND_VALUE(InotifyAddWatch(\n+ inotify_fd.get(), file.path(), IN_ALL_EVENTS | IN_EXCL_UNLINK));\n+\n// Even after unlinking, inode-level operations will trigger events regardless\n// of IN_EXCL_UNLINK.\nASSERT_THAT(unlink(file.path().c_str()), SyscallSucceeds());\n@@ -2015,20 +2049,28 @@ TEST(Inotify, ExcludeUnlinkInodeEvents_NoRandomSave) {\nstd::vector<Event> events =\nASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\nEXPECT_THAT(events, Are({\n- Event(IN_DELETE, wd, Basename(file.path())),\n- Event(IN_MODIFY, wd, Basename(file.path())),\n+ Event(IN_ATTRIB, file_wd),\n+ Event(IN_DELETE, dir_wd, Basename(file.path())),\n+ Event(IN_MODIFY, dir_wd, Basename(file.path())),\n+ Event(IN_MODIFY, file_wd),\n}));\nconst struct timeval times[2] = {{1, 0}, {2, 0}};\nASSERT_THAT(futimes(fd.get(), times), SyscallSucceeds());\nevents = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd, Basename(file.path()))}));\n+ EXPECT_THAT(events, Are({\n+ Event(IN_ATTRIB, dir_wd, Basename(file.path())),\n+ Event(IN_ATTRIB, file_wd),\n+ }));\n// S/R is disabled on this entire test due to behavior with unlink; it must\n// also be disabled after this point because of fchmod.\nASSERT_THAT(fchmod(fd.get(), 0777), SyscallSucceeds());\nevents = ASSERT_NO_ERRNO_AND_VALUE(DrainEvents(inotify_fd.get()));\n- EXPECT_THAT(events, Are({Event(IN_ATTRIB, wd, Basename(file.path()))}));\n+ EXPECT_THAT(events, Are({\n+ Event(IN_ATTRIB, dir_wd, Basename(file.path())),\n+ Event(IN_ATTRIB, file_wd),\n+ }));\n}\n// This test helps verify that the lock order of filesystem and inotify locks\n" } ]
Go
Apache License 2.0
google/gvisor
Complete inotify IN_EXCL_UNLINK implementation in VFS2. Events were only skipped on parent directories after their children were unlinked; events on the unlinked file itself need to be skipped as well. As a result, all Watches.Notify() calls need to know whether the dentry where the call came from was unlinked. Updates #1479. PiperOrigin-RevId: 317979476